Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/master' into additional-vo
Browse files Browse the repository at this point in the history
  • Loading branch information
Yiannis128 committed Nov 6, 2024
2 parents 4b2fe6f + dcb49ca commit 02932c8
Show file tree
Hide file tree
Showing 11 changed files with 135 additions and 62 deletions.
60 changes: 48 additions & 12 deletions .github/workflows/workflow.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,18 +2,46 @@ name: Checking
on: push

jobs:
setup-requirements:
name: Get Requirements
runs-on: ubuntu-latest
timeout-minutes: 10

steps:
# Setup Python (faster than using Python container)
- name: Setup Python
uses: actions/setup-python@v5.3.0
with:
python-version: "3.11"

- name: Check out repository code
uses: actions/checkout@v4.2.2

- name: Install Hatch
run: python -m pip install --upgrade hatch

- name: Generate Requirements
run: python -m hatch dep show requirements > requirements.txt

- name: Upload Requirements
uses: actions/upload-artifact@v4.4.3
with:
name: requirements
path: requirements.txt


pylint:
name: PyLint
runs-on: ubuntu-latest
timeout-minutes: 10

steps:
- name: Check out repository code
uses: actions/checkout@v3
uses: actions/checkout@v4.2.2

# Setup Python (faster than using Python container)
- name: Setup Python
uses: actions/setup-python@v4
uses: actions/setup-python@v5.3.0
with:
python-version: "3.11"

Expand All @@ -31,33 +59,41 @@ jobs:
test:
name: PyTest
needs: setup-requirements
runs-on: ubuntu-latest
timeout-minutes: 10

steps:
- name: Check out repository code
uses: actions/checkout@v3
uses: actions/checkout@v4.2.2

# Setup Python (faster than using Python container)
- name: Setup Python
uses: actions/setup-python@v4
uses: actions/setup-python@v5.3.0
with:
python-version: "3.11"

- name: Install pipenv
run: |
python -m pip install --upgrade pipenv wheel
- id: cache-pipenv
uses: actions/cache@v1.2.1
- name: Download Requirements
uses: actions/download-artifact@v4.1.8
with:
name: requirements
path: .

- name: Install Environment
run: python -m pip install --upgrade pipenv wheel

- name: Cache Pipenv
id: cache-pipenv
uses: actions/cache@v4.1.2
with:
path: ~/.local/share/virtualenvs
key: ${{ runner.os }}-pipenv-${{ hashFiles('**/Pipfile.lock') }}

- name: Install dependencies
if: steps.cache-pipenv.outputs.cache-hit != 'true'
run: |
pipenv install --deploy --dev
pipenv install -r requirements.txt
pipenv lock
- name: Run test suite
run: |
pipenv run pytest -v
run: pipenv run pytest -v
5 changes: 4 additions & 1 deletion .pylintrc
Original file line number Diff line number Diff line change
Expand Up @@ -431,7 +431,10 @@ disable=raw-checker-failed,
use-symbolic-message-instead,
use-implicit-booleaness-not-comparison-to-string,
use-implicit-booleaness-not-comparison-to-zero,
unspecified-encoding
unspecified-encoding,
too-many-arguments,
too-many-positional-arguments,
too-many-instance-attributes

# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
Expand Down
13 changes: 9 additions & 4 deletions esbmc_ai/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -417,10 +417,15 @@ def main() -> None:
if len(str(Config.get_user_chat_initial().content)) > 0:
printv("Using initial prompt from file...\n")
anim.start("Model is parsing ESBMC output... Please Wait")
response = chat.send_message(
message=str(Config.get_user_chat_initial().content),
)
anim.stop()
try:
response = chat.send_message(
message=str(Config.get_user_chat_initial().content),
)
except Exception as e:
print("There was an error while generating a response: {e}")
sys.exit(1)
finally:
anim.stop()

if response.finish_reason == FinishReason.length:
raise RuntimeError(f"The token length is too large: {chat.ai_model.tokens}")
Expand Down
3 changes: 1 addition & 2 deletions esbmc_ai/ai_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -243,6 +243,7 @@ def is_valid_ai_model(
"""Accepts both the AIModel object and the name as parameter. It checks the
openai servers to see if a model is defined on their servers, if not, then
it checks the internally defined AI models list."""
from openai import Client

# Get the name of the model
name: str = ai_model.name if isinstance(ai_model, AIModel) else ai_model
Expand All @@ -251,8 +252,6 @@ def is_valid_ai_model(
# NOTE: This is not tested as no way to mock API currently.
if api_keys and api_keys.openai:
try:
from openai import Client

for model in Client(api_key=api_keys.openai).models.list().data:
if model.id == name:
return True
Expand Down
50 changes: 26 additions & 24 deletions esbmc_ai/chats/base_chat_interface.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
# Author: Yiannis Charalambous

"""Contains code for the base class for interacting with the LLMs in a
conversation-based way."""

from abc import abstractmethod
from typing import Optional

Expand All @@ -14,7 +17,7 @@
from esbmc_ai.ai_models import AIModel


class BaseChatInterface(object):
class BaseChatInterface:
"""Base class for interacting with an LLM. It allows for interactions with
text generation LLMs and also chat LLMs."""

Expand All @@ -32,12 +35,14 @@ def __init__(

@abstractmethod
def compress_message_stack(self) -> None:
"""Compress the message stack, is abstract and needs to be implemented."""
raise NotImplementedError()

def push_to_message_stack(
self,
message: BaseMessage | tuple[BaseMessage, ...] | list[BaseMessage],
) -> None:
"""Pushes a message(s) to the message stack without querying the LLM."""
if isinstance(message, list) or isinstance(message, tuple):
self.messages.extend(list(message))
else:
Expand Down Expand Up @@ -85,31 +90,28 @@ def send_message(self, message: Optional[str] = None) -> ChatResponse:
all_messages = self._system_messages.copy()
all_messages.extend(self.messages.copy())

response: ChatResponse
try:
response_message: BaseMessage = self.llm.invoke(input=all_messages)
response_message: BaseMessage = self.llm.invoke(input=all_messages)

self.push_to_message_stack(message=response_message)
self.push_to_message_stack(message=response_message)

# Check if token limit has been exceeded.
all_messages.append(response_message)
new_tokens: int = self.llm.get_num_tokens_from_messages(
messages=all_messages,
)

# Check if token limit has been exceeded.
all_messages.append(response_message)
new_tokens: int = self.llm.get_num_tokens_from_messages(
messages=all_messages,
response: ChatResponse
if new_tokens > self.ai_model.tokens:
response = ChatResponse(
finish_reason=FinishReason.length,
message=response_message,
total_tokens=self.ai_model.tokens,
)
else:
response = ChatResponse(
finish_reason=FinishReason.stop,
message=response_message,
total_tokens=new_tokens,
)
if new_tokens > self.ai_model.tokens:
response = ChatResponse(
finish_reason=FinishReason.length,
message=response_message,
total_tokens=self.ai_model.tokens,
)
else:
response = ChatResponse(
finish_reason=FinishReason.stop,
message=response_message,
total_tokens=new_tokens,
)
except Exception as e:
print(f"There was an unkown error when generating a response: {e}")
exit(1)

return response
3 changes: 3 additions & 0 deletions esbmc_ai/chats/latest_state_solution_generator.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
# Author: Yiannis Charalambous

"""Contains code that extends the default solution generator to only use the
latest state of the code only (removes history)"""

from typing import Optional
from typing_extensions import override
from langchain_core.messages import BaseMessage
Expand Down
29 changes: 21 additions & 8 deletions esbmc_ai/chats/solution_generator.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
# Author: Yiannis Charalambous 2023

"""Contains code for automatically repairing code using ESBMC."""

from typing import Optional
from langchain_core.language_models import BaseChatModel
from typing_extensions import override
Expand All @@ -10,21 +12,25 @@
from esbmc_ai.solution import SourceFile

from esbmc_ai.ai_models import AIModel
from .base_chat_interface import BaseChatInterface
from esbmc_ai.esbmc_util import ESBMCUtil
from .base_chat_interface import BaseChatInterface


class ESBMCTimedOutException(Exception):
pass
"""Error that means that ESBMC timed out and so the error could not be
determined."""


class SourceCodeParseError(Exception):
pass
"""Error that means that SolutionGenerator could not parse the source code
to return the right format."""


def get_source_code_formatted(
source_code_format: str, source_code: str, esbmc_output: str
) -> str:
"""Gets the formatted output source code, based on the source_code_format
passed."""
match source_code_format:
case "single":
# Get source code error line from esbmc output
Expand All @@ -44,11 +50,14 @@ def get_source_code_formatted(


def get_esbmc_output_formatted(esbmc_output_type: str, esbmc_output: str) -> str:
"""Gets the formatted output ESBMC output, based on the esbmc_output_type
passed."""
# Check for parsing error
if "ERROR: PARSING ERROR" in esbmc_output:
# Parsing errors are usually small in nature.
raise SourceCodeParseError()
elif "ERROR: Timed out" in esbmc_output:

if "ERROR: Timed out" in esbmc_output:
raise ESBMCTimedOutException()

match esbmc_output_type:
Expand Down Expand Up @@ -204,7 +213,10 @@ def generate_solution(
So the system messages and initial message should each include at least
{source_code} and {esbmc_output} so that they are substituted into the
message."""
message.
Queries the AI model to get a solution. Accepts an override scenario
parameter, in which case the scenario won't be resolved automatically."""

assert (
self.source_code_raw is not None
Expand Down Expand Up @@ -249,9 +261,10 @@ def generate_solution(
self.esbmc_output
)

assert (
line
), "fix code command: error line could not be found to apply brutal patch replacement"
assert line, (
"fix code command: error line could not be found to apply "
"brutal patch replacement"
)
solution = SourceFile.apply_line_patch(
self.source_code_raw, solution, line, line
)
Expand Down
10 changes: 8 additions & 2 deletions esbmc_ai/chats/user_chat.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
# Author: Yiannis Charalambous 2023

"""Contains class that handles the UserChat of ESBMC-AI"""

from typing_extensions import override

from langchain.memory import ConversationSummaryMemory
Expand All @@ -15,6 +17,9 @@


class UserChat(BaseChatInterface):
"""Simple interface that talks to the LLM and stores the result. The class
also stores the fixed results from fix code command."""

solution: str = ""

def __init__(
Expand Down Expand Up @@ -55,8 +60,9 @@ def set_solution(self, source_code: str) -> None:

@override
def compress_message_stack(self) -> None:
"""Uses ConversationSummaryMemory from Langchain to summarize the conversation of all the non-protected
messages into one summary message which is added into the conversation as a SystemMessage.
"""Uses ConversationSummaryMemory from Langchain to summarize the
conversation of all the non-protected messages into one summary message
which is added into the conversation as a SystemMessage.
"""

memory: ConversationSummaryMemory = ConversationSummaryMemory.from_messages(
Expand Down
2 changes: 2 additions & 0 deletions esbmc_ai/commands/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
from .fix_code_command import FixCodeCommand
from .help_command import HelpCommand

"""This module contains built-in commands that can be executed by ESBMC-AI."""

__all__ = [
"ChatCommand",
"ExitCommand",
Expand Down
6 changes: 4 additions & 2 deletions esbmc_ai/commands/fix_code_command.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# Author: Yiannis Charalambous

import sys
from typing import Any, Optional, Tuple
from typing import Any, Optional
from typing_extensions import override

from esbmc_ai.ai_models import AIModel
Expand Down Expand Up @@ -42,6 +42,8 @@ def __str__(self) -> str:


class FixCodeCommand(ChatCommand):
"""Command for automatically fixing code using a verifier."""

on_solution_signal: Signal = Signal()

def __init__(self) -> None:
Expand Down Expand Up @@ -71,7 +73,7 @@ def print_raw_conversation() -> None:
)

message_history: str = (
kwargs["message_history"] if "message_history" else "normal"
kwargs["message_history"] if "message_history" in kwargs else "normal"
)

api_keys: APIKeyCollection = kwargs["api_keys"]
Expand Down
Loading

0 comments on commit 02932c8

Please sign in to comment.