From ea70ff0f951470b4bb52e30f5facc6e2222cffa2 Mon Sep 17 00:00:00 2001 From: Julio Nunes Avelar Date: Wed, 20 Nov 2024 16:12:30 -0300 Subject: [PATCH] Configurando pylint e formatando arquivos --- .github/workflows/pylint.yml | 6 +- config_generator.py | 297 ++++++++++++++++++++++++---------- core/config.py | 66 +++++--- core/file_manager.py | 150 ++++++++++++----- core/fpga.py | 245 ++++++++++++++++++++-------- core/graph.py | 148 +++++++++++++---- core/jenkins.py | 124 ++++++++++---- core/ollama.py | 213 ++++++++++++++++++++---- main.py | 96 ++++++++--- utils/clean.py | 16 +- utils/generate_config_file.py | 59 ++++--- utils/generate_dot.py | 18 +-- utils/graph.py | 87 +++++----- utils/ollama_test.py | 145 +++++++++-------- utils/plot.py | 70 ++++---- utils/run_all.py | 45 +++++- 16 files changed, 1274 insertions(+), 511 deletions(-) diff --git a/.github/workflows/pylint.yml b/.github/workflows/pylint.yml index c73e032..ad1adf8 100644 --- a/.github/workflows/pylint.yml +++ b/.github/workflows/pylint.yml @@ -7,7 +7,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.8", "3.9", "3.10"] + python-version: ["3.10", "3.11", "3.12"] steps: - uses: actions/checkout@v4 - name: Set up Python ${{ matrix.python-version }} @@ -18,6 +18,8 @@ jobs: run: | python -m pip install --upgrade pip pip install pylint + pip install -r requirements.txt - name: Analysing the code with pylint run: | - pylint $(git ls-files '*.py') + pylint $(git ls-files '*.py' ':!utils/*') --max-locals=30 \ + --disable=duplicate-code,import-error diff --git a/config_generator.py b/config_generator.py index 1af360f..84dbac0 100644 --- a/config_generator.py +++ b/config_generator.py @@ -1,3 +1,71 @@ +""" +This script automates the generation of processor configurations and Jenkinsfiles for FPGA projects. + +It includes the following functionality: +- Cloning processor repositories and analyzing their files. +- Extracting hardware modules and testbench files from the repository. +- Building module dependency graphs. +- Generating configuration files for the processor. +- Generating Jenkinsfiles for CI/CD pipelines targeting multiple FPGAs. +- Optionally adding generated configurations to a central configuration file or plotting module + graphs. + +Modules and Functions: +---------------------- +- **get_top_module_file**: Retrieves the file path of a specific top module. +- **copy_hardware_template**: Copies a hardware template file, naming it after the repository. +- **generate_processor_config**: Clones a repository, analyzes it, and generates a configuration + for the processor. +- **generate_all_pipelines**: Generates Jenkinsfiles for all processors defined in the configuration + file. +- **main**: Parses command-line arguments and triggers the appropriate operations. + +Command-Line Interface: +----------------------- +- `-j`, `--generate-all-jenkinsfiles`: Generates Jenkinsfiles for processors. +- `-c`, `--generate-config`: Generates a configuration for a specified processor. +- `-g`, `--plot-graph`: Plots the module dependency graph for the generated configuration. +- `-a`, `--add-config`: Adds the generated configuration to the central config file. +- `-p`, `--path-config`: Specifies the path to the config file (default: `config.json`). +- `-u`, `--processor-url`: Specifies the URL of the processor repository to clone. + +Constants: +---------- +- **EXTENSIONS**: Supported file extensions (`['v', 'sv', 'vhdl', 'vhd']`). +- **BASE_DIR**: Base directory for storing Jenkinsfiles. +- **FPGAs**: List of supported FPGAs for Jenkinsfile generation. +- **DESTINATION_DIR**: Temporary directory for processing repositories. +- **MAIN_SCRIPT_PATH**: Path to the main synthesis script used in Jenkinsfiles. + +Usage: +------ +1. To generate a processor configuration: +```python +python script.py -c -u +``` + +2. To generate all Jenkinsfiles: +```python +python script.py -j +``` + +3. For help: +```python +python script.py --help +``` + +Dependencies: +------------- +- `os`, `time`, `json`, `shutil`, `argparse`: Standard Python libraries. +- **Custom Modules**: +- `core.config`: Handles loading and saving configuration files. +- `core.file_manager`: Provides utilities for cloning repositories, finding files, and + extracting modules. +- `core.graph`: Builds and visualizes module dependency graphs. +- `core.jenkins`: Generates Jenkinsfiles. +- `core.ollama`: Filters files and identifies top modules. +""" + import os import time import json @@ -17,35 +85,55 @@ from core.jenkins import generate_jenkinsfile -BASE_DIR = "jenkins_pipeline/" +EXTENSIONS = ['v', 'sv', 'vhdl', 'vhd'] +BASE_DIR = 'jenkins_pipeline/' FPGAs = [ - "colorlight_i9", - "digilent_nexys4_ddr", + 'colorlight_i9', + 'digilent_nexys4_ddr', # "gowin_tangnano_20k", # "xilinx_vc709", # "digilent_arty_a7_100t" ] -DESTINATION_DIR = "./temp" -MAIN_SCRIPT_PATH = "/eda/processor-ci/main.py" +DESTINATION_DIR = './temp' +MAIN_SCRIPT_PATH = '/eda/processor-ci/main.py' def get_top_module_file(modules: list[dict[str, str]], top_module: str) -> str: + """ + Retrieves the file path of the specified top module from a list of module dictionaries. + + Args: + modules (list[dict[str, str]]): A list of dictionaries where each dictionary + contains the module name and its file path. + top_module (str): The name of the top module to find. + + Returns: + str: The file path of the top module if found, or an empty string otherwise. + """ for module in modules: - if module["module"] == top_module: - return module["file"] + if module['module'] == top_module: + return module['file'] - return "" + return '' def copy_hardware_template(repo_name: str) -> None: - # Caminho do diretório de origem - orig = "rtl/template.v" + """ + Copies a hardware template file to a new destination, renaming it based on the repository name. + + Args: + repo_name (str): The name of the repository to use in the destination file name. + + Returns: + None + """ + orig = 'rtl/template.v' # Caminho do diretório de destino - dest = f"rtl/{repo_name}.v" + dest = f'rtl/{repo_name}.v' if os.path.exists(dest): - print("Arquivo já existe") + print('Arquivo já existe') return # Copiar o diretório @@ -55,16 +143,28 @@ def copy_hardware_template(repo_name: str) -> None: def generate_processor_config( url: str, add_config: bool, plot_graph: bool, config_file_path: str ) -> None: - repo_name = url.split("/")[-1].replace(".git", "") + """ + Generates a processor configuration by cloning a repository, analyzing its files, + extracting modules, and optionally updating the configuration file and plotting graphs. + + Args: + url (str): URL of the processor's repository to clone. + add_config (bool): Whether to add the generated configuration to the config file. + plot_graph (bool): Whether to plot the module dependency graphs. + config_file_path (str): Path to the configuration file. + + Returns: + None + """ + repo_name = url.split('/')[-1].replace('.git', '') destination_path = clone_repo(url, repo_name) if not destination_path: - print("Não foi possível clonar o repositório.") + print('Não foi possível clonar o repositório.') return - extensions = ["v", "sv", "vhdl", "vhd"] - files, extension = find_files_with_extension(destination_path, extensions) + files, extension = find_files_with_extension(destination_path, EXTENSIONS) modules = extract_modules(files) @@ -72,8 +172,8 @@ def generate_processor_config( for module_name, file_path in modules: modulename_list.append( { - "module": module_name, - "file": os.path.relpath(file_path, destination_path), + 'module': module_name, + 'file': os.path.relpath(file_path, destination_path), } ) @@ -88,7 +188,8 @@ def generate_processor_config( tb_files = [os.path.relpath(tb_f, destination_path) for tb_f in tb_files] non_tb_files = [ - os.path.relpath(non_tb_f, destination_path) for non_tb_f in non_tb_files + os.path.relpath(non_tb_f, destination_path) + for non_tb_f in non_tb_files ] include_dirs = find_include_dirs(destination_path) @@ -103,40 +204,40 @@ def generate_processor_config( non_tb_files, tb_files, modules, module_graph, repo_name ) - language_version = "2005" + language_version = '2005' - if extension == ".vhdl": - language_version = "08" - elif extension == ".vhd": - language_version = "08" - elif extension == ".sv": - language_version = "2012" + if extension == '.vhdl': + language_version = '08' + elif extension == '.vhd': + language_version = '08' + elif extension == '.sv': + language_version = '2012' output_json = { - "name": repo_name, - "folder": repo_name, - "sim_files": tb_files, - "files": filtered_files, - "include_dirs": include_dirs, - "repository": url, - "top_module": top_module, - "extra_flags": [], - "language_version": language_version, + 'name': repo_name, + 'folder': repo_name, + 'sim_files': tb_files, + 'files': filtered_files, + 'include_dirs': include_dirs, + 'repository': url, + 'top_module': top_module, + 'extra_flags': [], + 'language_version': language_version, } - print("Result: ") + print('Result: ') print(json.dumps(output_json, indent=4)) - output_json["modules"] = modulename_list - output_json["module_graph"] = module_graph - output_json["module_graph_inverse"] = module_graph_inverse - output_json["non_tb_files"] = non_tb_files - - log_file = open(f"logs/{repo_name}_{time.time()}.json", "w") - - log_file.write(json.dumps(output_json, indent=4)) + output_json['modules'] = modulename_list + output_json['module_graph'] = module_graph + output_json['module_graph_inverse'] = module_graph_inverse + output_json['non_tb_files'] = non_tb_files - log_file.close() + with open( + f'logs/{repo_name}_{time.time()}.json', 'w', encoding='utf-8' + ) as log_file: + log_file.write(json.dumps(output_json, indent=4)) + log_file.close() copy_hardware_template(repo_name) # top_module_file = get_top_module_file(modulename_list, top_module) @@ -147,96 +248,128 @@ def generate_processor_config( if add_config: config = load_config(config_file_path) - config["cores"][repo_name] = output_json + config['cores'][repo_name] = output_json save_config(config_file_path, config) if plot_graph: # Plotar os grafos - plot_graph(module_graph, "Grafo Direto dos Módulos") - plot_graph(module_graph_inverse, "Grafo Inverso dos Módulos") + plot_graph(module_graph, 'Grafo Direto dos Módulos') + plot_graph(module_graph_inverse, 'Grafo Inverso dos Módulos') def generate_all_pipelines(config_file_path: str) -> None: + """ + Generates Jenkinsfiles for all processors defined in the configuration file. + + Args: + config_file_path (str): Path to the configuration file. + + Returns: + None + """ config = load_config(config_file_path) - for key in config["cores"].keys(): + for key in config['cores'].keys(): processor_data = get_processor_data(config, key) generate_jenkinsfile( processor_data, FPGAs, MAIN_SCRIPT_PATH, - processor_data["language_version"], - processor_data["extra_flags"], + processor_data['language_version'], + processor_data['extra_flags'], + ) + os.rename( + 'Jenkinsfile', f'{BASE_DIR}{processor_data["name"]}.Jenkinsfile' ) - os.rename("Jenkinsfile", f'{BASE_DIR}{processor_data["name"]}.Jenkinsfile') - print("Jenkinsfiles generated successfully.") + print('Jenkinsfiles generated successfully.') def main() -> None: + """ + Main entry point of the script. Parses command-line arguments and executes the + corresponding actions. + + Command-line arguments: + -j, --generate-all-jenkinsfiles: Generates Jenkinsfiles for the processors. + -c, --generate-config: Generates a processor configuration. + -g, --plot-graph: Plots the module dependency graph. + -a, --add-config: Adds the generated configuration to the config file. + -p, --path-config: Path to the config file (default: 'config.json'). + -u, --processor-url: URL of the processor repository. + + Raises: + ValueError: If `--generate-config` is used without providing `--processor-url`. + + Returns: + None + """ parser = argparse.ArgumentParser( - description="Script para gerar as configurações de um processador" + description='Script para gerar as configurações de um processador' ) parser = argparse.ArgumentParser( - description="Script to generate processor configurations" + description='Script to generate processor configurations' ) parser.add_argument( - "-j", - "--generate-all-jenkinsfiles", - action="store_true", - help="Generates a Jenkinsfiles for the processors", + '-j', + '--generate-all-jenkinsfiles', + action='store_true', + help='Generates a Jenkinsfiles for the processors', ) parser.add_argument( - "-c", - "--generate-config", - action="store_true", - help="Generates a processor configuration", + '-c', + '--generate-config', + action='store_true', + help='Generates a processor configuration', ) parser.add_argument( - "-g", - "--plot-graph", - action="store_true", - help="Plots the graph of the generated configuration", + '-g', + '--plot-graph', + action='store_true', + help='Plots the graph of the generated configuration', ) parser.add_argument( - "-a", - "--add-config", - action="store_true", - help="Adds the generated configuration to the config file", + '-a', + '--add-config', + action='store_true', + help='Adds the generated configuration to the config file', ) parser.add_argument( - "-p", - "--path-config", + '-p', + '--path-config', type=str, - default="config.json", - help="Path to the config file", + default='config.json', + help='Path to the config file', ) parser.add_argument( - "-u", - "--processor-url", + '-u', + '--processor-url', type=str, - help="URL of the processor repository", + help='URL of the processor repository', ) args = parser.parse_args() if args.generate_config: if not args.processor_url: - raise ValueError("Argumento processor-url não encontrado") + raise ValueError('Argumento processor-url não encontrado') generate_processor_config( - args.processor_url, args.add_config, args.plot_graph, args.path_config + args.processor_url, + args.add_config, + args.plot_graph, + args.path_config, ) if args.generate_all_jenkinsfiles: generate_all_pipelines(args.path_config) if not args.generate_config and not args.generate_all_jenkinsfiles: - print("Nenhum comando fornecido, utilize --help para listar as opcões") + print('Nenhum comando fornecido, utilize --help para listar as opcões') -if __name__ == "__main__": +if __name__ == '__main__': main() diff --git a/core/config.py b/core/config.py index c9bb0ea..05fa795 100644 --- a/core/config.py +++ b/core/config.py @@ -1,50 +1,80 @@ +""" +This module provides utilities for handling configuration files +in JSON format, including operations to load, save, and fetch specific data, +such as processor information. + +Main functions: +- load_config: Loads a JSON configuration file. +- save_config: Saves a dictionary to a JSON configuration file. +- get_processor_data: Retrieves processor information from the configuration file. +""" + import os import json def load_config(config_path: str) -> dict: - """ - Carrega o arquivo de configuração JSON e retorna seu conteúdo. + """Loads a JSON configuration file and returns its content. - :param config_path: Caminho para o arquivo de configuração JSON. - :return: Conteúdo do JSON como dicionário. + Args: + config_path (str): Path to the JSON configuration file. + + Returns: + dict: Content of the JSON file as a dictionary. + + Raises: + FileNotFoundError: If the specified configuration file does not exist. + json.JSONDecodeError: If the file is not a valid JSON. """ if not os.path.exists(config_path): raise FileNotFoundError( - f"O arquivo de configuração {config_path} não foi encontrado." + f'The configuration file {config_path} was not found.' ) - with open(config_path, "r") as file: + with open(config_path, 'r', encoding='utf-8') as file: config_data = json.load(file) return config_data def save_config(config_path: str, config_data: dict) -> None: - """ - Salva o dicionário de configuração no arquivo JSON especificado. + """Saves a dictionary to a specified JSON configuration file. - :param config_path: Caminho para o arquivo de configuração JSON. - :param config_data: Dicionário de dados de configuração a ser salvo. + Args: + config_path (str): Path to the JSON configuration file. + config_data (dict): Configuration data to be saved. + + Returns: + None + + Raises: + TypeError: If the data provided is not serializable to JSON. + IOError: If there is an issue writing to the file. """ - with open(config_path, "w") as file: + with open(config_path, 'w', encoding='utf-8') as file: json.dump(config_data, file, indent=4) def get_processor_data(config: dict, processor_name: str) -> dict: - """ - Busca os dados do processador pelo nome no arquivo de configuração. + """Fetches processor data by name from the configuration dictionary. + + Args: + config (dict): Loaded configuration dictionary. + processor_name (str): Name of the processor to retrieve. + + Returns: + dict: Dictionary containing processor data. - :param config: Dicionário de configuração carregado. - :param processor_name: Nome do processador que deseja buscar. - :return: Dicionário com os dados do processador, ou None se não encontrado. + Raises: + KeyError: If the 'cores' key is missing in the configuration. + ValueError: If the processor is not found in the configuration. """ - cores = config.get("cores", {}) + cores = config.get('cores', {}) processor_data = cores.get(processor_name) if not processor_data: raise ValueError( - f"Processador '{processor_name}' não encontrado na configuração." + f"Processor '{processor_name}' not found in the configuration." ) return processor_data diff --git a/core/file_manager.py b/core/file_manager.py index 67ec0c2..251b858 100644 --- a/core/file_manager.py +++ b/core/file_manager.py @@ -1,68 +1,124 @@ +""" +This module provides utilities for handling Git repositories, +searching and analyzing files with specific extensions (Verilog, VHDL), +and identifying modules and entities in HDL designs. + +Main functions: +- clone_repo: Clones a GitHub repository. +- remove_repo: Removes a cloned repository. +- find_files_with_extension: Finds files with specific extensions. +- is_testbench_file: Checks if a file appears to be a testbench. +- find_include_dirs: Locates directories containing include files. +- extract_modules: Extracts modules and entities from HDL files. +""" + import subprocess import os import glob import re import shutil -# Constante com o diretório de destino -DESTINATION_DIR = "./temp" +# Constant for the destination directory +DESTINATION_DIR = './temp' + + +def clone_repo(url: str, repo_name: str) -> str: + """Clones a GitHub repository to a specified directory. + Args: + url (str): URL of the GitHub repository. + repo_name (str): Name of the repository (used as the directory name). -def clone_repo(url, repo_name): - """Clona um repositório do GitHub para um diretório especificado.""" + Returns: + str: Path to the cloned repository. + + Raises: + subprocess.CalledProcessError: If the cloning process fails. + """ destination_path = os.path.join(DESTINATION_DIR, repo_name) try: - # Clonar o repositório subprocess.run( - ["git", "clone", "--recursive", url, destination_path], check=True + ['git', 'clone', '--recursive', url, destination_path], check=True ) return destination_path except subprocess.CalledProcessError as e: - print(f"Erro ao clonar o repositório: {e}") + print(f'Error cloning the repository: {e}') return None -def remove_repo(repo_name): - destination_path = os.path.join(DESTINATION_DIR, repo_name) +def remove_repo(repo_name: str) -> None: + """Removes a cloned repository. + Args: + repo_name (str): Name of the repository to be removed. + + Returns: + None + """ + destination_path = os.path.join(DESTINATION_DIR, repo_name) shutil.rmtree(destination_path) -def find_files_with_extension(directory, extensions) -> tuple[list, str]: - """Encontra arquivos com extensões específicas em um diretório.""" - extension = ".v" +def find_files_with_extension( + directory: str, extensions: list[str] +) -> tuple[list[str], str]: + """Finds files with specific extensions in a directory. + + Args: + directory (str): Path to the directory to search. + extensions (list[str]): List of file extensions to search for. + + Returns: + tuple[list[str], str]: List of found files and the predominant file extension. + + Raises: + IndexError: If no files with the specified extensions are found. + """ + extension = '.v' files = [] for extension in extensions: - files.extend(glob.glob(f"{directory}/**/*.{extension}", recursive=True)) + files.extend( + glob.glob(f'{directory}/**/*.{extension}', recursive=True) + ) - if ".sv" in files[0]: - extension = ".sv" - elif ".vhdl" in files[0]: - extension = ".vhdl" - elif ".vhd" in files[0]: - extension = ".vhd" - elif ".v" in files[0]: - extension = ".v" + if '.sv' in files[0]: + extension = '.sv' + elif '.vhdl' in files[0]: + extension = '.vhdl' + elif '.vhd' in files[0]: + extension = '.vhd' + elif '.v' in files[0]: + extension = '.v' return files, extension -def is_testbench_file(file_path, repo_name): - """Verifica se o arquivo parece ser um testbench baseado no nome ou na localização.""" - relative_path = os.path.relpath(file_path, os.path.join(DESTINATION_DIR, repo_name)) +def is_testbench_file(file_path: str, repo_name: str) -> bool: + """Checks if a file is likely to be a testbench based on its name or location. + + Args: + file_path (str): Path to the file. + repo_name (str): Name of the repository containing the file. + + Returns: + bool: True if the file is a testbench, otherwise False. + """ + relative_path = os.path.relpath( + file_path, os.path.join(DESTINATION_DIR, repo_name) + ) file_name = os.path.basename(relative_path) directory_parts = os.path.dirname(relative_path).split(os.sep) - # Verificando se o nome do arquivo contém palavras-chave - if re.search(r"(tb|testbench|test|verif)", file_name, re.IGNORECASE): + # Checking if the file name contains keywords + if re.search(r'(tb|testbench|test|verif)', file_name, re.IGNORECASE): return True - # Verificando se alguma parte do caminho contém palavras-chave + # Checking if any part of the path contains keywords for part in directory_parts: if re.search( - r"(tests?|testbenches?|testbenchs?|simulations?|tb|sim|verif)", + r'(tests?|testbenches?|testbenchs?|simulations?|tb|sim|verif)', part, re.IGNORECASE, ): @@ -71,27 +127,39 @@ def is_testbench_file(file_path, repo_name): return False -def find_include_dirs(directory): - """Encontra todos os diretórios que contêm arquivos de inclusão.""" - include_files = glob.glob(f"{directory}/**/*.(svh|vh)", recursive=True) - include_dirs = list(set([os.path.dirname(file) for file in include_files])) +def find_include_dirs(directory: str) -> set[str]: + """Finds directories containing include files (.svh or .vh). + + Args: + directory (str): Path to the directory to search. + + Returns: + set[str]: Set of directories containing include files. + """ + include_files = glob.glob(f'{directory}/**/*.(svh|vh)', recursive=True) + include_dirs = {os.path.dirname(file) for file in include_files} return include_dirs -def extract_modules(files): - """Extrai módulos e entidades de arquivos Verilog, SystemVerilog e VHDL.""" +def extract_modules(files: list[str]) -> list[tuple[str, str]]: + """Extracts modules and entities from HDL files. + + Args: + files (list[str]): List of HDL file paths. + + Returns: + list[tuple[str, str]]: List of tuples with module/entity names and their file paths. + """ modules = [] - module_pattern_verilog = re.compile(r"module\s+(\w+)\s*") - entity_pattern_vhdl = re.compile(r"entity\s+(\w+)\s+is", re.IGNORECASE) + module_pattern_verilog = re.compile(r'module\s+(\w+)\s*') + entity_pattern_vhdl = re.compile(r'entity\s+(\w+)\s+is', re.IGNORECASE) for file_path in files: - with open( - file_path, "r", errors="ignore" - ) as f: # Ignorar erros de decodificação + with open(file_path, 'r', errors='ignore', encoding='utf-8') as f: content = f.read() - # Encontrar módulos Verilog/SystemVerilog + # Find Verilog/SystemVerilog modules verilog_matches = module_pattern_verilog.findall(content) modules.extend( [ @@ -100,7 +168,7 @@ def extract_modules(files): ] ) - # Encontrar entidades VHDL + # Find VHDL entities vhdl_matches = entity_pattern_vhdl.findall(content) modules.extend( [ diff --git a/core/fpga.py b/core/fpga.py index 7e93d2f..77f9d08 100644 --- a/core/fpga.py +++ b/core/fpga.py @@ -1,3 +1,48 @@ +""" +This module provides utility functions for generating build scripts, managing toolchain +configurations, and executing build and flash processes for various FPGA development boards. + +Main Features: +1. **Macro Management**: + - Dynamically retrieves macro definitions specific to the target board. + +2. **Prefix Command Determination**: + - Selects appropriate commands for handling Verilog and VHDL files based on the + board and file type. + +3. **Build Script Generation**: + - Creates a complete build script by combining base configuration templates and + specific file paths. + +4. **Build Process Execution**: + - Automates the FPGA build process using Makefiles and specified build scripts. + +5. **Flashing the FPGA**: + - Handles the process of flashing the generated bitstream to the target FPGA board. + +Functions: +- `get_macros(board: str) -> str`: Returns macro definitions for a specified board. +- `get_prefix(board: str, vhdl: bool) -> str`: Determines the appropriate prefix command + for file processing. +- `make_build_file(config: dict, board: str, toolchain_path: str) -> str`: Generates a + build script for the target board. +- `build(build_script_path: str, board: str, toolchain_path: str) -> None`: Executes + the build process using Makefiles. +- `flash(board: str, toolchain_path: str) -> None`: Flashes the generated bitstream + to the target board. + +Usage: +- Ensure that the toolchain path and configuration files are properly set up. +- Use `make_build_file` to generate a build script for the target FPGA. +- Execute `build` to compile the design and `flash` to program the FPGA. + +Dependencies: +- Python's standard `os` and `subprocess` modules are used for file operations and + command execution. +- The environment must have the necessary FPGA toolchain and Makefiles for the + specified boards. +""" + import os import subprocess @@ -6,136 +51,200 @@ def get_macros(board: str) -> str: - if board == "colorlight_i9": - return "-DID=0x6a6a6a6a -DCLOCK_FREQ=25000000 -DMEMORY_SIZE=4096" + """ + Retrieves the macro definitions based on the target board. + + Args: + board (str): The name of the board. + + Returns: + str: The macro definitions as a string. + """ + if board == 'colorlight_i9': + return '-DID=0x6a6a6a6a -DCLOCK_FREQ=25000000 -DMEMORY_SIZE=4096' - if board == "digilent_nexys4_ddr": - return '-tclargs "ID=0x6a6a6a6a" "CLOCK_FREQ=50000000" "MEMORY_SIZE=4096"' + if board == 'digilent_nexys4_ddr': + return ( + '-tclargs "ID=0x6a6a6a6a" "CLOCK_FREQ=50000000" "MEMORY_SIZE=4096"' + ) - if board == "digilent_arty_a7_100t": - return '-tclargs "ID=0x6a6a6a6a" "CLOCK_FREQ=50000000" "MEMORY_SIZE=4096"' + if board == 'digilent_arty_a7_100t': + return ( + '-tclargs "ID=0x6a6a6a6a" "CLOCK_FREQ=50000000" "MEMORY_SIZE=4096"' + ) - if board == "xilinx_vc709": + if board == 'xilinx_vc709': return '-tclargs "ID=0x6a6a6a6a" "CLOCK_FREQ=100000000" "MEMORY_SIZE=4096"' return '-tclargs "ID=0x6a6a6a6a" "CLOCK_FREQ=50000000" "MEMORY_SIZE=4096"' def get_prefix(board: str, vhdl: bool) -> str: - if board == "gowin_tangnano_20k": - return "add_file" + """ + Determines the file prefix command based on the target board and file type. + + Args: + board (str): The name of the board. + vhdl (bool): Whether the file is a VHDL file. + + Returns: + str: The prefix command to use. + """ + if board == 'gowin_tangnano_20k': + return 'add_file' if vhdl: - if board == "colorlight_i9": - return "yosys ghdl -a" - return "read_vhdl" + if board == 'colorlight_i9': + return 'yosys ghdl -a' + return 'read_vhdl' - if board == "colorlight_i9": - return "yosys read_verilog" + if board == 'colorlight_i9': + return 'yosys read_verilog' - return "read_verilog" + return 'read_verilog' def make_build_file(config: dict, board: str, toolchain_path: str) -> str: - - if toolchain_path[-1] == "/": + """ + Generates a build script for the specified board and configuration. + + Args: + config (dict): Configuration dictionary with file details. + board (str): The name of the board. + toolchain_path (str): Path to the toolchain directory. + + Returns: + str: The path to the generated build script. + + Raises: + FileNotFoundError: If the base configuration file does not exist. + ValueError: If the base configuration file cannot be read. + """ + if toolchain_path[-1] == '/': toolchain_path = toolchain_path[:-1] - base_config_path = f"{toolchain_path}/processor-ci/build_scripts/{board}.tcl" + base_config_path = ( + f'{toolchain_path}/processor-ci/build_scripts/{board}.tcl' + ) if not os.path.exists(base_config_path): raise FileNotFoundError( - f"O arquivo de configuração {base_config_path} não foi encontrado." + f'The configuration file {base_config_path} was not found.' ) base_config = None - with open(base_config_path, "r") as file: + with open(base_config_path, 'r', encoding='utf-8') as file: base_config = file.read() if not base_config: raise ValueError( - f"Não foi possível ler o arquivo de configação {base_config_path}." + f'Unable to read the configuration file {base_config_path}.' ) - final_config_path = CURRENT_DIR + f"/build_{board}.tcl" + final_config_path = CURRENT_DIR + f'/build_{board}.tcl' - with open(final_config_path, "w") as file: + with open(final_config_path, 'w', encoding='utf-8') as file: prefix = get_prefix(board, False) file.write( - prefix + f' {toolchain_path}/processor-ci/rtl/{config["folder"]}.v\n' + prefix + + f' {toolchain_path}/processor-ci/rtl/{config["folder"]}.v\n' ) - for i in config["files"]: - prefix = get_prefix(board, i.endswith(".vhd")) - file.write(prefix + f" {CURRENT_DIR}/" + i + "\n") + for i in config['files']: + prefix = get_prefix(board, i.endswith('.vhd')) + file.write(prefix + f' {CURRENT_DIR}/' + i + '\n') file.write(base_config) - print(f"Arquivo de configuração final gerado em {final_config_path}") + print(f'Final configuration file generated at {final_config_path}') return final_config_path def build(build_script_path: str, board: str, toolchain_path: str) -> None: - if toolchain_path[-1] == "/": + """ + Executes the build process using the specified build script and makefile. + + Args: + build_script_path (str): Path to the build script. + board (str): The name of the board. + toolchain_path (str): Path to the toolchain directory. + + Returns: + None + + Raises: + subprocess.CalledProcessError: If the build process fails. + """ + if toolchain_path[-1] == '/': toolchain_path = toolchain_path[:-1] - makefile_path = f"{toolchain_path}/processor-ci/makefiles/{board}.mk" + makefile_path = f'{toolchain_path}/processor-ci/makefiles/{board}.mk' macros = get_macros(board) - # Define a variável BUILD_SCRIPT antes do comando make - process = subprocess.Popen( + # Set the BUILD_SCRIPT variable before running the make command + with subprocess.Popen( [ - "make", - "-f", + 'make', + '-f', makefile_path, - f"BUILD_SCRIPT={build_script_path}", - f"MACROS={macros}", + f'BUILD_SCRIPT={build_script_path}', + f'MACROS={macros}', ], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, - ) - - # Captura a saída e os erros - stdout, stderr = process.communicate() + ) as process: + # Capture the output and errors + stdout, stderr = process.communicate() - # Verifica o status da execução - if process.returncode == 0: - print("Makefile executado com sucesso.") - print("Saída do Makefile:") - print(stdout) - else: - print("Erro ao executar o Makefile.") - print(stderr) - raise subprocess.CalledProcessError(process.returncode, "make") + # Check the status of the execution + if process.returncode == 0: + print('Makefile executed successfully.') + print('Makefile output:') + print(stdout) + else: + print('Error executing Makefile.') + print(stderr) + raise subprocess.CalledProcessError(process.returncode, 'make') def flash(board: str, toolchain_path: str) -> None: - if toolchain_path[-1] == "/": + """ + Flashes the generated bitstream to the target board. + + Args: + board (str): The name of the board. + toolchain_path (str): Path to the toolchain directory. + + Returns: + None + + Raises: + subprocess.CalledProcessError: If the flashing process fails. + """ + if toolchain_path[-1] == '/': toolchain_path = toolchain_path[:-1] - makefile_path = f"{toolchain_path}/processor-ci/makefiles/{board}.mk" + makefile_path = f'{toolchain_path}/processor-ci/makefiles/{board}.mk' - process = subprocess.Popen( - ["make", "-f", makefile_path, "load"], + with subprocess.Popen( + ['make', '-f', makefile_path, 'load'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, - ) - - # Captura a saída e os erros - - stdout, stderr = process.communicate() - - # Verifica o status da execução - if process.returncode == 0: - print("Makefile executado com sucesso.") - print("Saída do Makefile:") - print(stdout) - else: - print("Erro ao executar o Makefile.") - print(stderr) - raise subprocess.CalledProcessError(process.returncode, "make") + ) as process: + # Capture the output and errors + stdout, stderr = process.communicate() + + # Check the status of the execution + if process.returncode == 0: + print('Makefile executed successfully.') + print('Makefile output:') + print(stdout) + else: + print('Error executing Makefile.') + print(stderr) + raise subprocess.CalledProcessError(process.returncode, 'make') diff --git a/core/graph.py b/core/graph.py index e817b26..7396270 100644 --- a/core/graph.py +++ b/core/graph.py @@ -1,26 +1,90 @@ +""" +This module provides functions for analyzing Verilog, SystemVerilog, and VHDL code to +identify module instances and build dependency graphs. It uses regular expressions to +find instances of modules within files and constructs direct and inverse +dependency graphs. The graphs are then visualized using the NetworkX and Matplotlib libraries. + +Functions: + +1. `find_module_instances(content: str, module_list: list) -> list` + Searches for instances of specified modules in the content of Verilog, SystemVerilog, + or VHDL files. + + Args: + content (str): The content of the file to search for module instances. + module_list (list): A list of module names to check for instances. + + Returns: + list: A list of module instances found in the content that match any of the modules + in `module_list`. + +2. `build_module_graph(files: list, modules: list[dict]) -> tuple[list, list]` + Builds a dependency graph between modules by analyzing the instances found in the given files. + + Args: + files (list): A list of file paths to search for module instances. + modules (list): A list of dictionaries containing module names and their respective + file paths. + + Returns: + tuple: A tuple containing two dictionaries: + - `module_graph`: A dictionary where each module name maps to a list of modules it + instantiates. + - `module_graph_inverse`: A dictionary where each module name maps to a list of modules + that instantiate it. + +3. `plot_graph(module_graph: dict, inverse: bool = False) -> None` + Plots the module dependency graph using the NetworkX and Matplotlib libraries. + + Args: + module_graph (dict): A dictionary representing the module dependency graph. + inverse (bool, optional): If True, plots the inverse graph (instantiator -> instantiated). + Defaults to False. + + Returns: + None: The graph is displayed using Matplotlib. + +This module is useful for analyzing and visualizing the relationships between modules in hardware +description language (HDL) code such as Verilog, SystemVerilog, and VHDL. +""" + + import re import networkx as nx import matplotlib.pyplot as plt def find_module_instances(content: str, module_list: list) -> list: - """Encontra instâncias de módulos em um arquivo Verilog, SystemVerilog ou VHDL.""" + """ + Finds instances of modules in a Verilog, SystemVerilog, or VHDL file. + + Args: + content (str): The content of the file to search for module instances. + module_list (list): A list of module names to check for instances. + + Returns: + list: A list of module instances found in the content that match any of + the modules in `module_list`. + """ instances = [] - # Padrão para instâncias em Verilog/SystemVerilog: modulo inst_name(...) ou modulo #(...) inst_name(...) + # Pattern for Verilog/SystemVerilog instances: module inst_name(...) or + # module #( ... ) inst_name(...) verilog_instance_pattern = re.compile( - r"(\w+)\s*(?:#\s*\(.*?\)\s*)?\w+\s*\(.*?\)\s*;", re.DOTALL + r'(\w+)\s*(?:#\s*\(.*?\)\s*)?\w+\s*\(.*?\)\s*;', re.DOTALL ) - # Padrão para instâncias em VHDL: component module_name is - vhdl_instance_pattern = re.compile(r"component\s+(\w+)\s+is", re.IGNORECASE) + # Pattern for VHDL instances: component module_name is + vhdl_instance_pattern = re.compile( + r'component\s+(\w+)\s+is', re.IGNORECASE + ) - # Procurar instâncias em Verilog/SystemVerilog + # Search for instances in Verilog/SystemVerilog for match in verilog_instance_pattern.findall(content): if match in module_list: instances.append(match) - # Procurar instâncias em VHDL + # Search for instances in VHDL for match in vhdl_instance_pattern.findall(content): if match in module_list: instances.append(match) @@ -29,34 +93,49 @@ def find_module_instances(content: str, module_list: list) -> list: def build_module_graph(files: list, modules: list[dict]) -> tuple[list, list]: - """Constrói um grafo de dependência entre os módulos.""" + """ + Builds a dependency graph between modules based on the files and module list. + + Args: + files (list): A list of file paths to search for module instances. + modules (list): A list of dictionaries containing module names and their + respective file paths. + + Returns: + tuple: A tuple containing two dictionaries: + - module_graph: A dictionary where each module name maps to a list of + modules it instantiates. + - module_graph_inverse: A dictionary where each module name maps to a + list of modules that instantiate it. + """ module_graph = {} module_graph_inverse = {} module_names = [module[0] for module in modules] - # Inicializar o grafo direto e inverso com cada módulo + # Initialize the direct and inverse graphs with each module for module_name, _ in modules: module_graph[module_name] = [] module_graph_inverse[module_name] = [] for file_path in files: with open( - file_path, "r", errors="ignore" - ) as f: # Ignorar erros de decodificação + file_path, 'r', errors='ignore', encoding='utf-8' + ) as f: # Ignore decoding errors content = f.read() - # Encontrar o nome do módulo atual (módulo onde as instâncias estão sendo feitas) - current_module_match = re.search(r"module\s+(\w+)", content) + # Find the current module name (module where instances are being made) + current_module_match = re.search(r'module\s+(\w+)', content) if not current_module_match: - continue # Ignorar arquivos sem um módulo Verilog + continue # Skip files without a Verilog module current_module_name = current_module_match.group(1) - # Encontrar as instâncias dentro deste módulo + # Find instances within this module module_instances = find_module_instances(content, module_names) - # Atualizar o grafo direto (instanciado -> instanciador) e inverso (instanciador -> instanciado) + # Update the direct (instantiated -> instantiator) and inverse + # (instantiator -> instantiated) graphs for instance in module_instances: if instance in module_graph: module_graph[instance].append(current_module_name) @@ -66,34 +145,49 @@ def build_module_graph(files: list, modules: list[dict]) -> tuple[list, list]: def plot_graph(module_graph: dict, inverse: bool = False) -> None: - G = nx.DiGraph() + """ + Plots the module dependency graph using NetworkX and Matplotlib. + + Args: + module_graph (dict): A dictionary representing the module dependency graph. + inverse (bool, optional): If True, plots the inverse graph + (instantiator -> instantiated). Defaults to False. + + Returns: + None: The graph is displayed using Matplotlib. + """ + graph = nx.DiGraph() for node, edges in module_graph.items(): for edge in edges: if inverse: - G.add_edge(edge, node) + graph.add_edge(edge, node) else: - G.add_edge(node, edge) + graph.add_edge(node, edge) plt.figure(figsize=(10, 8)) - # Escolher o layout - pos = nx.spring_layout(G) # Tente também circular_layout, shell_layout, etc. + # Choose the layout + pos = nx.spring_layout( + graph + ) # You can also try circular_layout, shell_layout, etc. - # Desenhar o grafo + # Draw the graph nx.draw( - G, + graph, pos, with_labels=True, node_size=3000, - node_color="lightblue", + node_color='lightblue', font_size=10, - font_weight="bold", - edge_color="gray", + font_weight='bold', + edge_color='gray', arrows=True, ) plt.title( - "Module Dependency Graph (Inverse)" if inverse else "Module Dependency Graph" + 'Module Dependency Graph (Inverse)' + if inverse + else 'Module Dependency Graph' ) plt.show() diff --git a/core/jenkins.py b/core/jenkins.py index 77d2e0d..2961ced 100644 --- a/core/jenkins.py +++ b/core/jenkins.py @@ -1,3 +1,34 @@ +""" +This module contains the function `generate_jenkinsfile`, which is responsible for +generating a Jenkins pipeline configuration (Jenkinsfile) for FPGA simulation and build +pipelines. The function creates a Jenkins pipeline that includes stages for cloning the +repository, running simulations, and building FPGA designs in parallel. Additionally, it +provides support for different hardware description languages (VHDL and Verilog) and can +include pre-processing steps for Verilog file conversion. + +Key steps in the pipeline include: +- Cloning the repository. +- Running a simulation for the provided FPGA files (VHDL or Verilog). +- Parallel execution of FPGA build stages, including synthesis, flashing, and testing. + +The generated Jenkinsfile can be used to automate FPGA design verification and deployment +processes using Jenkins. + +Functions: + generate_jenkinsfile(config: dict, fpgas: list, main_script_path: str, + lang_version: str, extra_flags: list = None) -> None: + Generates a Jenkinsfile based on the provided configuration and FPGA details. + +Arguments: + config (dict): A dictionary containing project and FPGA configuration. + fpgas (list): A list of FPGA names to be included in the build pipeline. + main_script_path (str): The path to the main Python script used for synthesis and flashing. + lang_version (str): The version of the hardware description language + to be used (e.g., VHDL or Verilog). extra_flags (list, optional): + Additional flags for the simulation command. +""" + + def generate_jenkinsfile( config: dict, fpgas: list, @@ -5,7 +36,19 @@ def generate_jenkinsfile( lang_version: str, extra_flags: list = None, ) -> None: + """ + Generates a Jenkinsfile for FPGA build and simulation pipelines. + Args: + config (dict): Configuration dictionary containing project and FPGA details. + fpgas (list): List of FPGA names to be used in the pipeline. + main_script_path (str): Path to the main Python script for synthesis and flashing. + lang_version (str): The version of the VHDL or Verilog language to use. + extra_flags (list, optional): List of extra flags for the simulation command. + + Returns: + None + """ jenkinsfile = """ pipeline {{ agent any @@ -42,73 +85,90 @@ def generate_jenkinsfile( """ # Prepare file lists - files = " ".join(config.get("files", [])) - sim_files = " ".join(config.get("sim_files", [])) - include_dirs = " ".join(f"-I {inc}" for inc in config.get("include_dirs", [])) + files = ' '.join(config.get('files', [])) + sim_files = ' '.join(config.get('sim_files', [])) + include_dirs = ' '.join( + f'-I {inc}' for inc in config.get('include_dirs', []) + ) # Define extra flags if provided - extra_flags_str = " ".join(extra_flags) if extra_flags else "" + extra_flags_str = ' '.join(extra_flags) if extra_flags else '' # Determine simulation command based on file types is_vhdl = any( - file.endswith(".vhdl") or file.endswith(".vhd") - for file in config.get("files", []) + file.endswith('.vhdl') or file.endswith('.vhd') + for file in config.get('files', []) ) is_verilog = any( - file.endswith(".v") or file.endswith(".sv") for file in config.get("files", []) + file.endswith('.v') or file.endswith('.sv') + for file in config.get('files', []) ) if is_vhdl and not is_verilog: # VHDL simulation command - simulation_command = f'sh "ghdl -a --std={lang_version} {extra_flags_str} {include_dirs} {files} {sim_files}"' + simulation_command = f'sh "ghdl -a --std={lang_version} \ + {extra_flags_str} {include_dirs} {files} {sim_files}"' elif is_verilog and not is_vhdl: # Verilog simulation command - simulation_command = f'sh "iverilog -o simulation.out -g{lang_version} {extra_flags_str} -s {config["top_module"]} {include_dirs} {files} {sim_files}"' + simulation_command = f'sh "iverilog -o simulation.out -g{lang_version} {extra_flags_str}' \ + + f' -s {config["top_module"]} {include_dirs} {files} {sim_files}"' else: - raise ValueError("Os arquivos precisam ser exclusivamente VHDL ou Verilog.") + raise ValueError( + 'The files must be either exclusively VHDL or Verilog.' + ) # Prepare FPGA stages for each FPGA in parallel - fpga_parallel_stages = "\n ".join( + fpga_parallel_stages = '\n '.join( [ - f""" + """ stage('{fpga}') {{ options {{ lock(resource: '{fpga}') }} stages {{ - stage('Síntese e PnR') {{ + stage('Synthesis and PnR') {{ steps {{ - dir("{config['folder']}") {{ - echo 'Iniciando síntese para FPGA {fpga}.' - sh 'python3 {main_script_path} -c /eda/processor-ci/config.json -p {config["folder"]} -b {fpga}' + dir("{folder}") {{ + echo 'Starting synthesis for FPGA {fpga}.' + sh 'python3 {main_script_path} -c /eda/processor-ci/config.json \\ + -p {folder} -b {fpga}' }} }} }} stage('Flash {fpga}') {{ steps {{ - dir("{config['folder']}") {{ - echo 'FPGA {fpga} bloqueada para flash.' - sh 'python3 {main_script_path} -c /eda/processor-ci/config.json -p {config["folder"]} -b {fpga} -l' + dir("{folder}") {{ + echo 'Flashing FPGA {fpga}.' + sh 'python3 {main_script_path} -c /eda/processor-ci/config.json \\ + -p {folder} -b {fpga} -l' }} }} }} - stage('Teste {fpga}') {{ + stage('Test {fpga}') {{ steps {{ - echo 'Testando FPGA {fpga}.' - dir("{config['folder']}") {{ - sh 'PYTHONPATH=/eda/processor-ci-communication PORT={"/dev/ttyACM0" if fpga == "colorlight_i9" else "/dev/ttyUSB1"} python /eda/processor-ci-communication/run_tests.py' + echo 'Testing FPGA {fpga}.' + dir("{folder}") {{ + sh 'PYTHONPATH=/eda/processor-ci-communication PORT="{port}" \\ + python /eda/processor-ci-communication/run_tests.py' }} }} }} }} - }}""" + }}""".format( + fpga=fpga, + folder=config['folder'], + main_script_path=main_script_path, + port='/dev/ttyACM0' + if fpga == 'colorlight_i9' + else '/dev/ttyUSB1', + ) for fpga in fpgas ] ) - pre_script = "" + pre_script = '' - if "pre_script" in config.keys(): + if 'pre_script' in config.keys(): pre_script = f""" stage('Verilog Convert') {{ steps {{ @@ -121,10 +181,10 @@ def generate_jenkinsfile( # Generate Jenkinsfile content jenkinsfile = jenkinsfile.format( - repository=config["repository"], - folder=config["folder"], + repository=config['repository'], + folder=config['folder'], pre_script=pre_script, - top_module=config["top_module"], + top_module=config['top_module'], include_dirs=include_dirs, files=files, sim_files=sim_files, @@ -132,8 +192,8 @@ def generate_jenkinsfile( fpga_parallel_stages=fpga_parallel_stages, ) - # Save the Jenkinsfile - with open("Jenkinsfile", "w") as f: + # Save the Jenkinsfile with specified encoding + with open('Jenkinsfile', 'w', encoding='utf-8') as f: f.write(jenkinsfile) - print("Jenkinsfile generated successfully.") + print('Jenkinsfile generated successfully.') diff --git a/core/ollama.py b/core/ollama.py index c976726..9627aab 100644 --- a/core/ollama.py +++ b/core/ollama.py @@ -1,54 +1,151 @@ +""" +This script contains utilities for interacting with a language model server to perform operations +on processor-related hardware description language (HDL) files. It provides functions for sending +prompts, parsing responses, and generating outputs relevant to processor verification and design. + +Features: +- **Server Communication**: Interact with the specified language model server to process prompts. +- **File Filtering**: Identify and filter files relevant to processor functionality. +- **Top Module Detection**: Extract the processor's top module for further use in synthesis or + simulation. +- **Verilog File Generation**: Automatically generate Verilog files to connect the processor with + verification infrastructures. + +Modules: +- **`send_prompt`**: Sends a prompt to the language model and returns the response. +- **`parse_filtered_files`**: Parses text to extract a list of filtered HDL files. +- **`remove_top_module`**: Extracts the name of the top module from the model's response. +- **`get_filtered_files_list`**: Filters processor-relevant files using model analysis. +- **`get_top_module`**: Identifies the processor's top module based on file data and dependencies. +- **`generate_top_file`**: Creates a Verilog file for processor and verification infrastructure + integration. + +Dependencies: +- `ollama`: A client library for interacting with the language model. +- Standard Python libraries: `os`, `re`, and `time`. + +Configuration: +- `SERVER_URL`: Specifies the server's URL for the language model. + +Usage: +1. Adjust the `SERVER_URL` to point to the correct language model server. +2. Use the provided functions to filter files, identify the top module, and generate necessary + Verilog files. +3. Outputs can be used in HDL simulations, synthesis, and verification. + +Note: +- Ensure the server is running and accessible. +- All file paths and directory structures must match the expected inputs for successful operations. +""" + import os import re import time from ollama import Client -SERVER_URL = "http://enqii.lsc.ic.unicamp.br:11434" +SERVER_URL = 'http://enqii.lsc.ic.unicamp.br:11434' client = Client(host=SERVER_URL) -def send_prompt( - prompt: str, model: str = "qwen2.5:32b" -) -> tuple[bool, str]: # "qwen2.5:32b" +def send_prompt(prompt: str, model: str = 'qwen2.5:32b') -> tuple[bool, str]: + """ + Sends a prompt to the specified server and receives the model's response. + + Args: + prompt (str): The prompt to be sent to the model. + model (str, optional): The model to use. Default is 'qwen2.5:32b'. + Returns: + tuple: A tuple containing a boolean value (indicating success) + and the model's response as a string. + """ response = client.generate(prompt=prompt, model=model) - if not response or not "response" in response: - return 0, "" + if not response or 'response' not in response: + return 0, '' - return 1, response["response"] + return 1, response['response'] def parse_filtered_files(text: str) -> list: - # Expressão regular para capturar a lista dentro de colchetes - match = re.search(r"filtered_files:\s*\[\s*(.*?)\s*\]", text, re.DOTALL) + """ + Parses a text to extract a list of filtered files. - if match: - # Extrai o conteúdo dos colchetes - file_list_str = match.group(1) + Uses a regular expression to locate and capture a list of files present + in a string formatted as `filtered_files: []`. + Cleans up spaces and unnecessary characters before returning the results. - # Remove espaços em excesso, quebras de linha, e divide a string por vírgulas - file_list = [file.strip().strip("'") for file in file_list_str.split(",")] + Args: + text (str): The text to be parsed to find the filtered file list. + + Returns: + list: A list containing the names of filtered files. + Returns an empty list if no files are found. + """ + match = re.search(r'filtered_files:\s*\[\s*(.*?)\s*\]', text, re.DOTALL) + if match: + file_list_str = match.group(1) + file_list = [ + file.strip().strip("'") for file in file_list_str.split(',') + ] return file_list return [] def remove_top_module(text: str) -> str: - # Expressão regular para encontrar a linha com o formato top_module: - match = re.search(r"top_module:\s*(\S+)", text) + """ + Extracts the name of the top module from a given text. + + Uses a regular expression to locate and capture a line following the format + `top_module: `. If found, it returns the module name. + + Args: + text (str): The text to be parsed to find the top module. + + Returns: + str: The name of the top module extracted from the text. + Returns an empty string if no top module is found. + """ + match = re.search(r'top_module:\s*(\S+)', text) if match: - # Extrai o módulo encontrado top_module = match.group(1) return top_module - return "" + return '' -def get_filtered_files_list(files, sim_files, modules, tree, repo_name): +def get_filtered_files_list( + files: list[str], + sim_files: list[str], + modules: list[str], + tree, + repo_name: str, +) -> list[str]: + """ + Generates a list of files relevant to a processor based on the provided data. + + This function uses a language model to analyze lists of files, modules, + dependency trees, and repository data, filtering out irrelevant files such as + those related to peripherals, memories, or debugging. It returns only the files + directly related to the processor. + + Args: + files (list): List of available files. + sim_files (list): List of simulation and test-related files. + modules (list): List of modules present in the processor. + tree (list): Dependency structure of the modules. + repo_name (str): Name of the project repository. + + Returns: + list: A list containing the names of the files relevant to the processor. + + Raises: + NameError: If an error occurs during the language model query. + """ prompt = f""" Processors are generally divided into one or more modules; for example, I can have a module for the ALU, one for the register bank, etc. The files below are the hardware description language files for a processor and its peripherals. @@ -75,14 +172,40 @@ def get_filtered_files_list(files, sim_files, modules, tree, repo_name): ok, response = send_prompt(prompt) if not ok: - raise NameError("Erro ao consultar modelo") + raise NameError('Erro ao consultar modelo') print(response) return parse_filtered_files(response) -def get_top_module(files, sim_files, modules, tree, repo_name): +def get_top_module( + files: list[str], + sim_files: list[str], + modules: list[str], + tree, + repo_name: str, +) -> str: + """ + Identifies the processor's top module within a set of files. + + Uses a language model to analyze files, modules, dependency trees, + and repository data to determine the processor's top module, ignoring + other elements such as SoCs or peripherals. + + Args: + files (list): List of available files. + sim_files (list): List of simulation and test-related files. + modules (list): List of modules present in the processor. + tree (list): Dependency structure of the modules. + repo_name (str): Name of the project repository. + + Returns: + str: The name of the processor's top module. + + Raises: + NameError: If an error occurs during the language model query. + """ prompt = f""" Processors are generally divided into one or more modules; for example, I can have a module for the ALU, one for the register bank, etc. The files below are the hardware description language files for a processor and its peripherals. @@ -101,7 +224,7 @@ def get_top_module(files, sim_files, modules, tree, repo_name): ok, response = send_prompt(prompt) if not ok: - raise NameError("Erro ao consultar modelo") + raise NameError('Erro ao consultar modelo') # print(response) @@ -109,16 +232,36 @@ def get_top_module(files, sim_files, modules, tree, repo_name): def generate_top_file(top_module_file: str, processor_name: str) -> None: - template_file = open("rtl/template.v", "r") - top_module_file = open(f"temp/{processor_name}/{top_module_file}", "r") - example_file = open("rtl/Risco-5.v", "r") + """ + Generates a Verilog file connecting a processor to a verification infrastructure. + + This function creates a Verilog module based on a template, the processor's + top module file, and a provided example. It establishes the necessary connections + between the processor and the verification infrastructure. + + Args: + top_module_file (str): Path to the file containing the processor's top module. + processor_name (str): Name of the processor. + + Returns: + None: The result is saved in a Verilog file. + + Raises: + NameError: If an error occurs during the language model query. + """ + with open('rtl/template.v', 'r', encoding='utf-8') as template_file: + template = template_file.read() + + with open( + f'temp/{processor_name}/{top_module_file}', 'r', encoding='utf-8' + ) as top_module_file_: + top_module_content = top_module_file_.read() - template = template_file.read() - top_module_content = top_module_file.read() - example = example_file.read() + with open('rtl/Risco-5.v', 'r', encoding='utf-8') as example_file: + example = example_file.read() template_file.close() - top_module_file.close() + top_module_file_.close() example_file.close() prompt = f""" @@ -144,11 +287,11 @@ def generate_top_file(top_module_file: str, processor_name: str) -> None: ok, response = send_prompt(prompt) if not ok: - raise NameError("Erro ao consultar modelo") + raise NameError('Erro ao consultar modelo') - if os.path.exists(f"rtl/{processor_name}.v"): - processor_name = f"{processor_name}_{time.time()}" + if os.path.exists(f'rtl/{processor_name}.v'): + processor_name = f'{processor_name}_{time.time()}' - final_file = open(f"rtl/{processor_name}.v", "w") - final_file.write(response) - final_file.close() + with open(f'rtl/{processor_name}.v', 'w', encoding='utf-8') as final_file: + final_file.write(response) + final_file.close() diff --git a/main.py b/main.py index 0113714..2df9527 100644 --- a/main.py +++ b/main.py @@ -1,3 +1,39 @@ +""" +This script is designed to manage the configuration, build, and optional loading of FPGA bitstreams +based on specified processor and board configurations. It integrates functionality for handling +toolchains, generating build files, and flashing bitstreams onto the FPGA. + +Modules and Features: +- **Configuration Loading**: Reads a JSON configuration file to extract processor data. +- **Build File Generation**: Creates a build file for the specified processor and board. +- **Build and Flash**: Supports building and flashing bitstreams onto the FPGA. + +Functions: +- **`main`**: Executes the primary flow, including loading configuration, generating the build file, + and optionally flashing the FPGA. + +Command-Line Interface: +- The script supports command-line arguments to customize behavior. Use `-h` or `--help` to + display the CLI usage. + +Arguments: +- **`-c`/`--config`** (required): Path to the configuration file. +- **`-p`/`--processor`** (required): Name of the processor to use. +- **`-b`/`--board`** (required): Name of the board to target. +- **`-t`/`--toolchain`** (optional): Path to the toolchains (default: `/eda`). +- **`-l`/`--load`** (optional): Load the bitstream onto the FPGA after building. + +Usage Example: +```bash +python script.py -c config.json -p processor_name -b board_name -t /path/to/toolchain -l +``` +Requirements: + +- The configuration file should be in JSON format and contain details about the + processors and boards. +- Ensure the toolchain path and board are correctly set up in the environment. +""" + import argparse from core.config import get_processor_data, load_config from core.fpga import make_build_file, flash, build @@ -10,6 +46,24 @@ def main( toolchain_path: str, load: bool = False, ) -> None: + """Main function to handle FPGA design setup, build, and optional flashing. + Args: + config_path (str): Path to the configuration file in JSON format. + processor_name (str): Name of the processor to use. + board_name (str): Name of the target FPGA board. + toolchain_path (str): Path to the toolchain directory. + load (bool, optional): If `True`, flash the generated bitstream to the FPGA. + Defaults to `False`. + + Steps: + 1. Loads the configuration file to extract processor and board data. + 2. Generates a build file for the specified processor and board. + 3. Builds the design and optionally flashes the FPGA. + + Raises: + FileNotFoundError: If the configuration file cannot be found. + KeyError: If the processor or board data is missing in the configuration file. + """ # Carrega o arquivo de configuração config = load_config(config_path) @@ -18,7 +72,9 @@ def main( # Exibe os argumentos recebidos e os dados do processador - build_file_path = make_build_file(processor_data, board_name, toolchain_path) + build_file_path = make_build_file( + processor_data, board_name, toolchain_path + ) if load: flash(board_name, toolchain_path) @@ -26,50 +82,50 @@ def main( build(build_file_path, board_name, toolchain_path) -if __name__ == "__main__": +if __name__ == '__main__': parser = argparse.ArgumentParser( - description="Script para configurar o design com base no processador e placa." + description='Script para configurar o design com base no processador e placa.' ) # Definição dos argumentos obrigatórios parser.add_argument( - "-c", - "--config", + '-c', + '--config', type=str, required=True, - help="Caminho do arquivo de configuração do script.", + help='Caminho do arquivo de configuração do script.', ) parser.add_argument( - "-p", - "--processor", + '-p', + '--processor', type=str, required=True, - help="Nome do processador a ser utilizado.", + help='Nome do processador a ser utilizado.', ) parser.add_argument( - "-b", - "--board", + '-b', + '--board', type=str, required=True, - help="Nome da placa a ser utilizada.", + help='Nome da placa a ser utilizada.', ) # Parâmetro opcional para o caminho das toolchains parser.add_argument( - "-t", - "--toolchain", + '-t', + '--toolchain', type=str, - default="/eda", + default='/eda', required=False, - help="Caminho para as toolchains (padrão: /eda).", + help='Caminho para as toolchains (padrão: /eda).', ) # Parâmetro opcional para carregar o bitstream parser.add_argument( - "-l", - "--load", - action="store_true", - help="Carregar o bitstream na FPGA.", + '-l', + '--load', + action='store_true', + help='Carregar o bitstream na FPGA.', ) # Parse dos argumentos diff --git a/utils/clean.py b/utils/clean.py index 3bed01f..b4a10da 100644 --- a/utils/clean.py +++ b/utils/clean.py @@ -1,20 +1,20 @@ import json # Carregar o JSON do arquivo config.json -with open("config.json", "r") as file: +with open('config.json', 'r') as file: data = json.load(file) # Remover as chaves indesejadas -for core in data["cores"].values(): - core.pop("modules", None) - core.pop("module_graph", None) - core.pop("module_graph_inverse", None) - core.pop("non_tb_files", None) +for core in data['cores'].values(): + core.pop('modules', None) + core.pop('module_graph', None) + core.pop('module_graph_inverse', None) + core.pop('non_tb_files', None) # Salvar o resultado em config2.json -with open("config2.json", "w") as file: +with open('config2.json', 'w') as file: json.dump(data, file, indent=4) print( - "As chaves indesejadas foram removidas e o novo arquivo foi salvo como config2.json." + 'As chaves indesejadas foram removidas e o novo arquivo foi salvo como config2.json.' ) diff --git a/utils/generate_config_file.py b/utils/generate_config_file.py index cba5a7c..d8b83cc 100644 --- a/utils/generate_config_file.py +++ b/utils/generate_config_file.py @@ -5,7 +5,7 @@ import re # Constante com o diretório de destino -DESTINATION_DIR = "/home/julio/eda/teste_search_script" +DESTINATION_DIR = '/home/julio/eda/teste_search_script' def clone_repo(url, repo_name): @@ -14,10 +14,10 @@ def clone_repo(url, repo_name): try: # Clonar o repositório - subprocess.run(["git", "clone", url, destination_path], check=True) + subprocess.run(['git', 'clone', url, destination_path], check=True) return destination_path except subprocess.CalledProcessError as e: - print(f"Erro ao clonar o repositório: {e}") + print(f'Erro ao clonar o repositório: {e}') return None @@ -25,25 +25,31 @@ def find_files_with_extension(directory, extensions): """Encontra arquivos com extensões específicas em um diretório.""" files = [] for extension in extensions: - files.extend(glob.glob(f"{directory}/**/*.{extension}", recursive=True)) + files.extend( + glob.glob(f'{directory}/**/*.{extension}', recursive=True) + ) return files def is_testbench_file(file_path, repo_name): """Verifica se o arquivo parece ser um testbench baseado no nome ou na localização.""" - relative_path = os.path.relpath(file_path, os.path.join(DESTINATION_DIR, repo_name)) + relative_path = os.path.relpath( + file_path, os.path.join(DESTINATION_DIR, repo_name) + ) file_name = os.path.basename(relative_path) directory_parts = os.path.dirname(relative_path).split(os.sep) # Verificando se o nome do arquivo contém palavras-chave - if re.search(r"(tb|testbench|test)", file_name, re.IGNORECASE): + if re.search(r'(tb|testbench|test)', file_name, re.IGNORECASE): return True # Verificando se alguma parte do caminho contém palavras-chave for part in directory_parts: if re.search( - r"(tests?|testbenches|testbenchs?|simulations?|tb|sim)", part, re.IGNORECASE + r'(tests?|testbenches|testbenchs?|simulations?|tb|sim)', + part, + re.IGNORECASE, ): return True @@ -52,7 +58,7 @@ def is_testbench_file(file_path, repo_name): def find_include_dirs(directory): """Encontra todos os diretórios que contêm arquivos de inclusão.""" - include_files = glob.glob(f"{directory}/**/*.(svh|vh)", recursive=True) + include_files = glob.glob(f'{directory}/**/*.(svh|vh)', recursive=True) include_dirs = list(set([os.path.dirname(file) for file in include_files])) return include_dirs @@ -61,12 +67,12 @@ def extract_modules(files): """Extrai módulos e entidades de arquivos Verilog, SystemVerilog e VHDL.""" modules = [] - module_pattern_verilog = re.compile(r"module\s+(\w+)\s*") - entity_pattern_vhdl = re.compile(r"entity\s+(\w+)\s+is", re.IGNORECASE) + module_pattern_verilog = re.compile(r'module\s+(\w+)\s*') + entity_pattern_vhdl = re.compile(r'entity\s+(\w+)\s+is', re.IGNORECASE) for file_path in files: with open( - file_path, "r", errors="ignore" + file_path, 'r', errors='ignore' ) as f: # Ignorar erros de decodificação content = f.read() @@ -93,15 +99,15 @@ def extract_modules(files): def main(url): # Obter o nome do repositório a partir da URL - repo_name = url.split("/")[-1].replace(".git", "") + repo_name = url.split('/')[-1].replace('.git', '') destination_path = clone_repo(url, repo_name) if not destination_path: - print("Não foi possível clonar o repositório.") + print('Não foi possível clonar o repositório.') return - extensions = ["v", "sv", "vhdl", "vhd"] + extensions = ['v', 'sv', 'vhdl', 'vhd'] files = find_files_with_extension(destination_path, extensions) modules = extract_modules(files) @@ -110,8 +116,8 @@ def main(url): for module_name, file_path in modules: modulename_list.append( { - "module": module_name, - "file": os.path.relpath(file_path, destination_path), + 'module': module_name, + 'file': os.path.relpath(file_path, destination_path), } ) @@ -123,23 +129,26 @@ def main(url): # Montar o JSON de saída output_json = { - "name": repo_name, - "folder": repo_name, - "sim_files": [os.path.relpath(tb_f, destination_path) for tb_f in tb_files], - "design_files": [ - os.path.relpath(non_tb_f, destination_path) for non_tb_f in non_tb_files + 'name': repo_name, + 'folder': repo_name, + 'sim_files': [ + os.path.relpath(tb_f, destination_path) for tb_f in tb_files + ], + 'design_files': [ + os.path.relpath(non_tb_f, destination_path) + for non_tb_f in non_tb_files ], - "include_dirs": include_dirs, - "modules": modulename_list, + 'include_dirs': include_dirs, + 'modules': modulename_list, } print(json.dumps(output_json, indent=4)) def run(): - url = input("Insira a URL do repositório: ") + url = input('Insira a URL do repositório: ') main(url) -if __name__ == "__main__": +if __name__ == '__main__': run() diff --git a/utils/generate_dot.py b/utils/generate_dot.py index 5c8ff11..d23dc27 100644 --- a/utils/generate_dot.py +++ b/utils/generate_dot.py @@ -3,14 +3,14 @@ def generate_dot(graph, graph_name): """Gera o conteúdo do arquivo .dot a partir do grafo fornecido.""" - dot_content = f"digraph {graph_name} {{\n" + dot_content = f'digraph {graph_name} {{\n' for key, values in graph.items(): for value in values: - if key != "module": # Remover o módulo 'module' - dot_content += f" {key} -> {value};\n" + if key != 'module': # Remover o módulo 'module' + dot_content += f' {key} -> {value};\n' - dot_content += "}\n" + dot_content += '}\n' return dot_content @@ -19,22 +19,22 @@ def main(json_input): data = json.loads(json_input) # Gerar conteúdo para o arquivo dot do module_graph - module_graph_dot = generate_dot(data["module_graph"], "module_graph") + module_graph_dot = generate_dot(data['module_graph'], 'module_graph') # Gerar conteúdo para o arquivo dot do module_graph_inverse module_graph_inverse_dot = generate_dot( - data["module_graph_inverse"], "module_graph_inverse" + data['module_graph_inverse'], 'module_graph_inverse' ) # Salvar os arquivos .dot - with open("module_graph.dot", "w") as f: + with open('module_graph.dot', 'w') as f: f.write(module_graph_dot) - with open("module_graph_inverse.dot", "w") as f: + with open('module_graph_inverse.dot', 'w') as f: f.write(module_graph_inverse_dot) -if __name__ == "__main__": +if __name__ == '__main__': # Exemplo de JSON (substitua pelo seu JSON) json_data = """ { diff --git a/utils/graph.py b/utils/graph.py index 47150c9..4cd663c 100644 --- a/utils/graph.py +++ b/utils/graph.py @@ -8,7 +8,7 @@ import matplotlib.pyplot as plt # Constante com o diretório de destino -DESTINATION_DIR = "./temp" +DESTINATION_DIR = './temp' def clone_repo(url, repo_name): @@ -17,10 +17,10 @@ def clone_repo(url, repo_name): try: # Clonar o repositório - subprocess.run(["git", "clone", url, destination_path], check=True) + subprocess.run(['git', 'clone', url, destination_path], check=True) return destination_path except subprocess.CalledProcessError as e: - print(f"Erro ao clonar o repositório: {e}") + print(f'Erro ao clonar o repositório: {e}') return None @@ -34,25 +34,31 @@ def find_files_with_extension(directory, extensions): """Encontra arquivos com extensões específicas em um diretório.""" files = [] for extension in extensions: - files.extend(glob.glob(f"{directory}/**/*.{extension}", recursive=True)) + files.extend( + glob.glob(f'{directory}/**/*.{extension}', recursive=True) + ) return files def is_testbench_file(file_path, repo_name): """Verifica se o arquivo parece ser um testbench baseado no nome ou na localização.""" - relative_path = os.path.relpath(file_path, os.path.join(DESTINATION_DIR, repo_name)) + relative_path = os.path.relpath( + file_path, os.path.join(DESTINATION_DIR, repo_name) + ) file_name = os.path.basename(relative_path) directory_parts = os.path.dirname(relative_path).split(os.sep) # Verificando se o nome do arquivo contém palavras-chave - if re.search(r"(tb|testbench|test)", file_name, re.IGNORECASE): + if re.search(r'(tb|testbench|test)', file_name, re.IGNORECASE): return True # Verificando se alguma parte do caminho contém palavras-chave for part in directory_parts: if re.search( - r"(tests?|testbenches|testbenchs?|simulations?|tb|sim)", part, re.IGNORECASE + r'(tests?|testbenches|testbenchs?|simulations?|tb|sim)', + part, + re.IGNORECASE, ): return True @@ -61,7 +67,7 @@ def is_testbench_file(file_path, repo_name): def find_include_dirs(directory): """Encontra todos os diretórios que contêm arquivos de inclusão.""" - include_files = glob.glob(f"{directory}/**/*.(svh|vh)", recursive=True) + include_files = glob.glob(f'{directory}/**/*.(svh|vh)', recursive=True) include_dirs = list(set([os.path.dirname(file) for file in include_files])) return include_dirs @@ -70,12 +76,12 @@ def extract_modules(files): """Extrai módulos e entidades de arquivos Verilog, SystemVerilog e VHDL.""" modules = [] - module_pattern_verilog = re.compile(r"module\s+(\w+)\s*") - entity_pattern_vhdl = re.compile(r"entity\s+(\w+)\s+is", re.IGNORECASE) + module_pattern_verilog = re.compile(r'module\s+(\w+)\s*') + entity_pattern_vhdl = re.compile(r'entity\s+(\w+)\s+is', re.IGNORECASE) for file_path in files: with open( - file_path, "r", errors="ignore" + file_path, 'r', errors='ignore' ) as f: # Ignorar erros de decodificação content = f.read() @@ -106,11 +112,13 @@ def find_module_instances(content, module_list): # Padrão para instâncias em Verilog/SystemVerilog: modulo inst_name(...) ou modulo #(...) inst_name(...) verilog_instance_pattern = re.compile( - r"(\w+)\s*(?:#\s*\(.*?\)\s*)?\w+\s*\(.*?\)\s*;", re.DOTALL + r'(\w+)\s*(?:#\s*\(.*?\)\s*)?\w+\s*\(.*?\)\s*;', re.DOTALL ) # Padrão para instâncias em VHDL: component module_name is - vhdl_instance_pattern = re.compile(r"component\s+(\w+)\s+is", re.IGNORECASE) + vhdl_instance_pattern = re.compile( + r'component\s+(\w+)\s+is', re.IGNORECASE + ) # Procurar instâncias em Verilog/SystemVerilog for match in verilog_instance_pattern.findall(content): @@ -139,12 +147,12 @@ def build_module_graph(files, modules): for file_path in files: with open( - file_path, "r", errors="ignore" + file_path, 'r', errors='ignore' ) as f: # Ignorar erros de decodificação content = f.read() # Encontrar o nome do módulo atual (módulo onde as instâncias estão sendo feitas) - current_module_match = re.search(r"module\s+(\w+)", content) + current_module_match = re.search(r'module\s+(\w+)', content) if not current_module_match: continue # Ignorar arquivos sem um módulo Verilog @@ -175,7 +183,9 @@ def plot_graph(module_graph, inverse=False): plt.figure(figsize=(10, 8)) # Escolher o layout - pos = nx.spring_layout(G) # Tente também circular_layout, shell_layout, etc. + pos = nx.spring_layout( + G + ) # Tente também circular_layout, shell_layout, etc. # Desenhar o grafo nx.draw( @@ -183,30 +193,32 @@ def plot_graph(module_graph, inverse=False): pos, with_labels=True, node_size=3000, - node_color="lightblue", + node_color='lightblue', font_size=10, - font_weight="bold", - edge_color="gray", + font_weight='bold', + edge_color='gray', arrows=True, ) plt.title( - "Module Dependency Graph (Inverse)" if inverse else "Module Dependency Graph" + 'Module Dependency Graph (Inverse)' + if inverse + else 'Module Dependency Graph' ) plt.show() def main(url): # Obter o nome do repositório a partir da URL - repo_name = url.split("/")[-1].replace(".git", "") + repo_name = url.split('/')[-1].replace('.git', '') destination_path = clone_repo(url, repo_name) if not destination_path: - print("Não foi possível clonar o repositório.") + print('Não foi possível clonar o repositório.') return - extensions = ["v", "sv", "vhdl", "vhd"] + extensions = ['v', 'sv', 'vhdl', 'vhd'] files = find_files_with_extension(destination_path, extensions) modules = extract_modules(files) @@ -215,8 +227,8 @@ def main(url): for module_name, file_path in modules: modulename_list.append( { - "module": module_name, - "file": os.path.relpath(file_path, destination_path), + 'module': module_name, + 'file': os.path.relpath(file_path, destination_path), } ) @@ -231,16 +243,19 @@ def main(url): # Montar o JSON de saída output_json = { - "name": repo_name, - "folder": repo_name, - "sim_files": [os.path.relpath(tb_f, destination_path) for tb_f in tb_files], - "design_files": [ - os.path.relpath(non_tb_f, destination_path) for non_tb_f in non_tb_files + 'name': repo_name, + 'folder': repo_name, + 'sim_files': [ + os.path.relpath(tb_f, destination_path) for tb_f in tb_files + ], + 'design_files': [ + os.path.relpath(non_tb_f, destination_path) + for non_tb_f in non_tb_files ], - "include_dirs": include_dirs, - "repository": url, - "top_module": "", - "extra_flags": [], + 'include_dirs': include_dirs, + 'repository': url, + 'top_module': '', + 'extra_flags': [], # "modules": modulename_list, # "module_graph": module_graph, # "module_graph_inverse": module_graph_inverse @@ -256,9 +271,9 @@ def main(url): def run(): - url = input("Insira a URL do repositório: ") + url = input('Insira a URL do repositório: ') main(url) -if __name__ == "__main__": +if __name__ == '__main__': run() diff --git a/utils/ollama_test.py b/utils/ollama_test.py index c0471ac..67e25a9 100644 --- a/utils/ollama_test.py +++ b/utils/ollama_test.py @@ -10,13 +10,13 @@ # Step 1: Clone the GitHub Repository def clone_repository(repo_url, local_dir): if os.path.exists(local_dir): - print(f"Directory {local_dir} already exists. Deleting and recloning.") + print(f'Directory {local_dir} already exists. Deleting and recloning.') shutil.rmtree(local_dir) # Remove the directory if it already exists try: - subprocess.run(["git", "clone", repo_url, local_dir], check=True) - print(f"Cloned repository into {local_dir}") + subprocess.run(['git', 'clone', repo_url, local_dir], check=True) + print(f'Cloned repository into {local_dir}') except subprocess.CalledProcessError as e: - print(f"Failed to clone the repository: {e}") + print(f'Failed to clone the repository: {e}') sys.exit(1) @@ -27,36 +27,36 @@ def find_cpu_top_and_dependencies(repo_dir): os.path.join(dp, f) for dp, dn, filenames in os.walk(repo_dir) for f in filenames - if f.endswith(".v") + if f.endswith('.v') ] # Create the prompt for Ollama - prompt = f"The repository contains the following Verilog files: {verilog_files}. Identify the top-level CPU module (typically named top, core, cpu_top, cpu_csr, or main) (give me just one top module) and list all the necessary Verilog files required for compilation.\n" + prompt = f'The repository contains the following Verilog files: {verilog_files}. Identify the top-level CPU module (typically named top, core, cpu_top, cpu_csr, or main) (give me just one top module) and list all the necessary Verilog files required for compilation.\n' - prompt_2 = "Also organize the files like the following template:\n\ + prompt_2 = 'Also organize the files like the following template:\n\ Top-Level Module:\n\ top_module goes here\n\ - Modules required for compilation:\n" + Modules required for compilation:\n' - prompt_3 = "Also show the path from after the /ollama-tests/ directory to the file" + prompt_3 = 'Also show the path from after the /ollama-tests/ directory to the file' prompt = prompt + prompt_2 + prompt_3 # Request Ollama to analyze the Verilog files - response = client.generate(model="qwen2.5:32b", prompt=prompt) + response = client.generate(model='qwen2.5:32b', prompt=prompt) # Check if response contains expected content - if response and "response" in response: + if response and 'response' in response: print( - "Ollama Response:", response["response"] + 'Ollama Response:', response['response'] ) # Debugging print to ensure you see the response try: # Variables to hold the top module and core modules - top_level_module = "" + top_level_module = '' cpu_core_modules = [] # Split the response into lines - lines = response["response"].split("\n") + lines = response['response'].split('\n') # Initialize flags to detect sections top_module_section = False @@ -66,36 +66,38 @@ def find_cpu_top_and_dependencies(repo_dir): for line in lines: line = line.strip() # Clean up extra spaces - if "Top-Level Module:" in line: + if 'Top-Level Module:' in line: top_module_section = True cpu_core_section = False # Reset any other section continue # Move to the next line after the header - if "Modules required for compilation:" in line: + if 'Modules required for compilation:' in line: cpu_core_section = True top_module_section = False # Reset top module section continue # Move to the next line after the header # Capture the top-level module - if top_module_section and line.startswith("-"): - top_level_module = line.split("- ")[1].strip() + if top_module_section and line.startswith('-'): + top_level_module = line.split('- ')[1].strip() # Capture the CPU core modules - if cpu_core_section and line.startswith("-"): - cpu_core_modules.append(line.split("- ")[1].strip()) + if cpu_core_section and line.startswith('-'): + cpu_core_modules.append(line.split('- ')[1].strip()) # Check if we successfully extracted the top module and core modules if top_level_module and cpu_core_modules: return top_level_module, cpu_core_modules else: - print("Failed to find the top-level module or CPU core modules.") + print( + 'Failed to find the top-level module or CPU core modules.' + ) sys.exit(1) except Exception as e: - print(f"Error parsing Ollama response: {e}") + print(f'Error parsing Ollama response: {e}') sys.exit(1) else: - print("No valid response from Ollama.") + print('No valid response from Ollama.') sys.exit(1) @@ -105,33 +107,33 @@ def find_cpu_top_and_dependencies(repo_dir): os.path.join(dp, f) for dp, dn, filenames in os.walk(repo_dir) for f in filenames - if f.endswith(".v") + if f.endswith('.v') ] # Create the prompt for Ollama - prompt = f"The repository contains the following Verilog files: {verilog_files}. Identify the top-level CPU module (typically named top, core, cpu_top, cpu_csr, or main) (give me just one top module) and list all the necessary Verilog files required for compilation.\n" - prompt += "Also organize the files like the following template:\n" - prompt += "Top-Level Module:\n top_module goes here\nModules required for compilation:\n" - prompt += "Also show the path from after the /ollama-tests/ directory to the file" + prompt = f'The repository contains the following Verilog files: {verilog_files}. Identify the top-level CPU module (typically named top, core, cpu_top, cpu_csr, or main) (give me just one top module) and list all the necessary Verilog files required for compilation.\n' + prompt += 'Also organize the files like the following template:\n' + prompt += 'Top-Level Module:\n top_module goes here\nModules required for compilation:\n' + prompt += 'Also show the path from after the /ollama-tests/ directory to the file' # Request Ollama to analyze the Verilog files - response = client.generate(model="qwen2.5:32b", prompt=prompt) + response = client.generate(model='qwen2.5:32b', prompt=prompt) # Check if response contains expected content - if response and "response" in response: + if response and 'response' in response: print( - "Ollama Response:", response["response"] + 'Ollama Response:', response['response'] ) # Debugging print to ensure you see the response try: # Variables to hold the top module and core modules - top_level_module = "" + top_level_module = '' core_modules = [] additional_modules = [] testbench_modules = [] # Split the response into lines - lines = response["response"].split("\n") + lines = response['response'].split('\n') # Initialize flags to detect sections top_module_section = False @@ -142,12 +144,12 @@ def find_cpu_top_and_dependencies(repo_dir): line = line.strip() # Clean up extra spaces # Identify sections based on the headings in the response - if "Top-Level Module:" in line: + if 'Top-Level Module:' in line: top_module_section = True core_module_section = False # Reset other sections continue # Move to the next line after the header - if "Modules required for compilation:" in line: + if 'Modules required for compilation:' in line: core_module_section = True top_module_section = False # Reset other sections continue @@ -164,14 +166,14 @@ def find_cpu_top_and_dependencies(repo_dir): if top_level_module and core_modules: return top_level_module, core_modules else: - print("Failed to find the top-level module or core modules.") + print('Failed to find the top-level module or core modules.') sys.exit(1) except Exception as e: - print(f"Error parsing Ollama response: {e}") + print(f'Error parsing Ollama response: {e}') sys.exit(1) else: - print("No valid response from Ollama.") + print('No valid response from Ollama.') sys.exit(1) @@ -185,11 +187,11 @@ def get_necessary_verilog_files_and_dirs(repo_dir): # Get unique directories for the necessary Verilog files necessary_dirs = {os.path.dirname(file) for file in necessary_files} - print("Necessary .v files for CPU compilation:") + print('Necessary .v files for CPU compilation:') for file in necessary_files: print(file) - print("\nNecessary directories to include with -I flag:") + print('\nNecessary directories to include with -I flag:') for directory in necessary_dirs: print(directory) @@ -202,43 +204,48 @@ def get_necessary_verilog_files_and_dirs(repo_dir): # Step 4: Ask Ollama which Verilog standard is required def determine_verilog_standard(verilog_files): - files_str = ", ".join(verilog_files) + files_str = ', '.join(verilog_files) - prompt = f"Based on the following Verilog files: {files_str}, determine which Verilog standard (2001, 2005, 2005-sv, 2008) is required for proper compilation." + prompt = f'Based on the following Verilog files: {files_str}, determine which Verilog standard (2001, 2005, 2005-sv, 2008) is required for proper compilation.' - response = client.generate(model="qwen2.5:32b", prompt=prompt) + response = client.generate(model='qwen2.5:32b', prompt=prompt) - if response and "response" in response: + if response and 'response' in response: print("Ollama's recommendation for Verilog standard:") - print(response["response"]) - return response["response"].strip().lower() + print(response['response']) + return response['response'].strip().lower() else: - print("Failed to determine the required Verilog standard.") + print('Failed to determine the required Verilog standard.') sys.exit(1) # Step 5: Check for extra flags needed for Icarus Verilog compilation using Ollama def check_extra_flags(verilog_files): - files_str = ", ".join(verilog_files) + files_str = ', '.join(verilog_files) # Use Ollama to suggest any extra flags - prompt = f"The following Verilog files: {files_str} are part of a CPU design. Can you suggest any extra Icarus Verilog flags that might be needed during compilation?" + prompt = f'The following Verilog files: {files_str} are part of a CPU design. Can you suggest any extra Icarus Verilog flags that might be needed during compilation?' - response = client.generate(model="qwen2.5:32b", prompt=prompt) - print(response["response"]) + response = client.generate(model='qwen2.5:32b', prompt=prompt) + print(response['response']) - if response and "response" in response: + if response and 'response' in response: print("Ollama's suggestion for extra Icarus Verilog flags:") - print(response["response"]) - return response["response"].strip().split() + print(response['response']) + return response['response'].strip().split() else: - print("Failed to determine extra Icarus Verilog flags.") + print('Failed to determine extra Icarus Verilog flags.') sys.exit(1) # Step 6: Write JSON file based on the specified template def write_json_output( - repo_url, top_module, necessary_files, language_version, include_dirs, extra_flags + repo_url, + top_module, + necessary_files, + language_version, + include_dirs, + extra_flags, ): output_data = ' \ "template": { \ @@ -254,32 +261,34 @@ def write_json_output( "enable": false \ }' - prompt = f"Based on the following template: {output_data} write a json file with the following parameters: {repo_url}, {necessary_files}, {language_version}, {top_module}, {include_dirs}, {extra_flags}" + prompt = f'Based on the following template: {output_data} write a json file with the following parameters: {repo_url}, {necessary_files}, {language_version}, {top_module}, {include_dirs}, {extra_flags}' - response = client.generate(model="qwen2.5:32b", prompt=prompt) + response = client.generate(model='qwen2.5:32b', prompt=prompt) print(response) # Write the JSON to a file - json_file_path = "output.json" - with open(json_file_path, "w") as json_file: + json_file_path = 'output.json' + with open(json_file_path, 'w') as json_file: json.dump(response, json_file, indent=4) - print(f"JSON output written to {json_file_path}") + print(f'JSON output written to {json_file_path}') # Example usage -if __name__ == "__main__": - client = Client(host="http://enqii.lsc.ic.unicamp.br:11434") +if __name__ == '__main__': + client = Client(host='http://enqii.lsc.ic.unicamp.br:11434') # Provide the GitHub URL for the CPU design - repo_url = input("Enter the GitHub repo URL for the Verilog CPU design: ") - local_dir = "./Documentos/ollama-testes/" + repo_url = input('Enter the GitHub repo URL for the Verilog CPU design: ') + local_dir = './Documentos/ollama-testes/' # Clone the repository clone_repository(repo_url, local_dir) # Get the list of necessary Verilog files and directories for the CPU - necessary_verilog_files, necessary_verilog_dirs, top_module = ( - get_necessary_verilog_files_and_dirs(local_dir) - ) + ( + necessary_verilog_files, + necessary_verilog_dirs, + top_module, + ) = get_necessary_verilog_files_and_dirs(local_dir) # Determine which Verilog standard is required verilog_standard = determine_verilog_standard(necessary_verilog_files) diff --git a/utils/plot.py b/utils/plot.py index f609c8a..b1dcaaa 100644 --- a/utils/plot.py +++ b/utils/plot.py @@ -2,35 +2,37 @@ # Dados fornecidos dados = { - "Risco_5": {"LUT4": 3152, "Frequency": 54.5}, - "DarkRISCV": {"LUT4": 2291, "Frequency": 56.8}, - "SERV": {"LUT4": 301, "Frequency": 121.7}, - "RISCV Steel": {"LUT4": 6040, "Frequency": 39.03}, - "Mriscv": {"LUT4": 2658, "Frequency": 66.84}, - "TinyRiscv": {"LUT4": 3964, "Frequency": 59.85}, - "riskow": {"LUT4": 1957, "Frequency": 63.45}, - "riscado-v": {"LUT4": 2246, "Frequency": 45.22}, + 'Risco_5': {'LUT4': 3152, 'Frequency': 54.5}, + 'DarkRISCV': {'LUT4': 2291, 'Frequency': 56.8}, + 'SERV': {'LUT4': 301, 'Frequency': 121.7}, + 'RISCV Steel': {'LUT4': 6040, 'Frequency': 39.03}, + 'Mriscv': {'LUT4': 2658, 'Frequency': 66.84}, + 'TinyRiscv': {'LUT4': 3964, 'Frequency': 59.85}, + 'riskow': {'LUT4': 1957, 'Frequency': 63.45}, + 'riscado-v': {'LUT4': 2246, 'Frequency': 45.22}, } dados_xilinx = { - "Risco_5": {"LUT4": 2359, "Frequency": 70.8}, - "DarkRISCV": {"LUT4": 1189, "Frequency": 74.0}, - "SERV": {"LUT4": 125, "Frequency": 147.3}, - "RISCV Steel": {"LUT4": 2006, "Frequency": 50.7}, - "Mriscv": {"LUT4": 1766, "Frequency": 86.8}, - "TinyRiscv": {"LUT4": 2570, "Frequency": 77.8}, - "riskow": {"LUT4": 1399, "Frequency": 82.48}, - "riscado-v": {"LUT4": 990, "Frequency": 58.7}, + 'Risco_5': {'LUT4': 2359, 'Frequency': 70.8}, + 'DarkRISCV': {'LUT4': 1189, 'Frequency': 74.0}, + 'SERV': {'LUT4': 125, 'Frequency': 147.3}, + 'RISCV Steel': {'LUT4': 2006, 'Frequency': 50.7}, + 'Mriscv': {'LUT4': 1766, 'Frequency': 86.8}, + 'TinyRiscv': {'LUT4': 2570, 'Frequency': 77.8}, + 'riskow': {'LUT4': 1399, 'Frequency': 82.48}, + 'riscado-v': {'LUT4': 990, 'Frequency': 58.7}, } # Preparar dados para a tecnologia Lattice ECP45F (original) -lut4_values_lattice = [dados[key]["LUT4"] for key in dados] -frequencia_values_lattice = [dados[key]["Frequency"] for key in dados] +lut4_values_lattice = [dados[key]['LUT4'] for key in dados] +frequencia_values_lattice = [dados[key]['Frequency'] for key in dados] labels = list(dados.keys()) # Preparar dados para a tecnologia Xilinx XC7A100T (ajustes de área e frequência) -lut4_values_xilinx = [dados_xilinx[key]["LUT4"] for key in dados_xilinx] -frequencia_values_xilinx = [dados_xilinx[key]["Frequency"] for key in dados_xilinx] +lut4_values_xilinx = [dados_xilinx[key]['LUT4'] for key in dados_xilinx] +frequencia_values_xilinx = [ + dados_xilinx[key]['Frequency'] for key in dados_xilinx +] # Criar gráfico de pontos plt.figure(figsize=(10, 6)) @@ -39,8 +41,8 @@ plt.scatter( frequencia_values_lattice, lut4_values_lattice, - color="blue", - label="Lattice ECP45F", + color='blue', + label='Lattice ECP45F', s=150, ) @@ -48,8 +50,8 @@ plt.scatter( frequencia_values_xilinx, lut4_values_xilinx, - color="red", - label="Xilinx XC7A100T", + color='red', + label='Xilinx XC7A100T', s=150, ) @@ -60,8 +62,8 @@ lut4_values_lattice[i] + 100, label, fontsize=14, - ha="center", - va="bottom", + ha='center', + va='bottom', ) # Adicionar rótulos aos pontos para Xilinx com fonte maior e centralizado @@ -71,17 +73,19 @@ lut4_values_xilinx[i] + 100, label, fontsize=14, - ha="center", - va="bottom", + ha='center', + va='bottom', ) # Adicionar títulos e rótulos aos eixos com fonte aumentada -plt.title("Frequency vs. LUT for Lattice ECP45F and Xilinx XC7A100T", fontsize=16) -plt.xlabel("Frequency (MHz)", fontsize=18) -plt.ylabel("LUT", fontsize=18) +plt.title( + 'Frequency vs. LUT for Lattice ECP45F and Xilinx XC7A100T', fontsize=16 +) +plt.xlabel('Frequency (MHz)', fontsize=18) +plt.ylabel('LUT', fontsize=18) # Aumentar o tamanho da fonte dos ticks dos eixos -plt.tick_params(axis="both", which="major", labelsize=16) +plt.tick_params(axis='both', which='major', labelsize=16) # Adicionar uma grade plt.grid(True) @@ -90,5 +94,5 @@ plt.legend(fontsize=16) # Mostrar gráfico -plt.savefig("plot.png", format="png") +plt.savefig('plot.png', format='png') plt.show() diff --git a/utils/run_all.py b/utils/run_all.py index aa32d93..3c20dde 100644 --- a/utils/run_all.py +++ b/utils/run_all.py @@ -1,20 +1,51 @@ +""" +Este script lê uma lista de URLs de um arquivo, executa um comando com cada URL usando proxychains, +e gera configurações através do script `config_generator.py`. Para cada URL, o comando é executado +com um tempo limite de 3 minutos (180 segundos). + +O script faz o seguinte: +1. Lê as URLs de um arquivo chamado 'arquivos.txt'. +2. Para cada URL, executa um comando utilizando `proxychains` com o script `config_generator.py`. +3. Cada comando é executado com um tempo limite de 180 segundos. +4. Em caso de erro de execução ou tempo limite, o script imprime mensagens apropriadas. + +Argumentos: + Não há argumentos de linha de comando. O arquivo de entrada `arquivos.txt` deve estar presente + no diretório de execução. + +Exceções: + - `subprocess.TimeoutExpired`: Se o comando não for concluído dentro do tempo limite. + - `subprocess.CalledProcessError`: Se ocorrer um erro ao executar o comando. + +Nota: + O arquivo de entrada `arquivos.txt` deve conter uma URL por linha. +""" + import subprocess # Caminho para o arquivo que contém a lista de URLs -file_path = "arquivos.txt" +FILE_PATH = 'arquivos.txt' # Abrir o arquivo e ler as URLs -with open(file_path, "r") as file: +with open(FILE_PATH, 'r', encoding='utf-8') as file: urls = file.readlines() # Remover qualquer espaço ou quebra de linha ao final de cada URL urls = [url.strip() for url in urls] # Comando base -command_base = ["proxychains", "python", "config_generator.py", "-u", "", "-c", "-a"] +command_base = [ + 'proxychains', + 'python', + 'config_generator.py', + '-u', + '', + '-c', + '-a', +] # Timeout de 3 minutos (180 segundos) -timeout_seconds = 180 +TIMEOUT_SECONDS = 180 # Para cada URL na lista, executar o comando com timeout for url in urls: @@ -24,10 +55,10 @@ try: # Executar o comando com timeout - subprocess.run(command_base, timeout=timeout_seconds, check=True) + subprocess.run(command_base, timeout=TIMEOUT_SECONDS, check=True) except subprocess.TimeoutExpired: print( - f"Comando para {url} atingiu o tempo limite de {timeout_seconds} segundos." + f'Comando para {url} atingiu o tempo limite de {TIMEOUT_SECONDS} segundos.' ) except subprocess.CalledProcessError as e: - print(f"Erro ao executar o comando para {url}: {e}") + print(f'Erro ao executar o comando para {url}: {e}')