Skip to content

Commit

Permalink
Enhance cognition progress calculation by adding new metrics and adju…
Browse files Browse the repository at this point in the history
…sting weights; implement profiling for performance monitoring and optimize memory usage; update unit tests to reflect changes
  • Loading branch information
kasinadhsarma committed Dec 26, 2024
1 parent a621223 commit 3d3bba1
Show file tree
Hide file tree
Showing 4 changed files with 133 additions and 12 deletions.
Binary file modified models/__pycache__/consciousness_model.cpython-310.pyc
Binary file not shown.
40 changes: 32 additions & 8 deletions models/consciousness_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
from typing import Any, Dict, List, Tuple
import torch.nn.functional as F
import logging
import time # Added for profiling

from .attention import GlobalWorkspace
from .memory import WorkingMemory, InformationIntegration
Expand Down Expand Up @@ -211,20 +212,26 @@ def calculate_cognition_progress(self, metrics):
"""
Calculate the percentage of cognition achieved based on multiple metrics.
"""
start_time = time.time() # Start profiling

scores = {
'phi': metrics.get('phi', 0),
'coherence': metrics.get('coherence', 0),
'stability': metrics.get('stability', 0),
'adaptability': metrics.get('adaptability', 0),
'memory_retention': metrics.get('memory_retention', 0)
'memory_retention': metrics.get('memory_retention', 0),
'emotional_coherence': metrics.get('emotional_coherence', 0),
'decision_making_efficiency': metrics.get('decision_making_efficiency', 0)
}

weights = {
'phi': 0.3,
'coherence': 0.2,
'stability': 0.15,
'adaptability': 0.15,
'memory_retention': 0.2
'phi': 0.25,
'coherence': 0.15,
'stability': 0.1,
'adaptability': 0.1,
'memory_retention': 0.15,
'emotional_coherence': 0.15,
'decision_making_efficiency': 0.1
}

weighted_score = sum(weights[k] * scores[k] for k in weights)
Expand All @@ -234,6 +241,9 @@ def calculate_cognition_progress(self, metrics):
'total': cognition_percentage,
'breakdown': scores
})

end_time = time.time() # End profiling
self.logger.debug(f"calculate_cognition_progress took {end_time - start_time:.6f} seconds")

return cognition_percentage

Expand All @@ -252,7 +262,9 @@ def report_cognition_progress(self):
f"- Thought Coherence: {latest['breakdown']['coherence']*100:.2f}%",
f"- Context Stability: {latest['breakdown']['stability']*100:.2f}%",
f"- Adaptability: {latest['breakdown']['adaptability']*100:.2f}%",
f"- Memory Retention: {latest['breakdown']['memory_retention']*100:.2f}%\n",
f"- Memory Retention: {latest['breakdown']['memory_retention']*100:.2f}%",
f"- Emotional Coherence: {latest['breakdown']['emotional_coherence']*100:.2f}%", # New metric
f"- Decision Making Efficiency: {latest['breakdown']['decision_making_efficiency']*100:.2f}%\n", # New metric
"Areas Needing Improvement:"
]

Expand All @@ -266,6 +278,8 @@ def forward(self, inputs, state=None, initial_state=None, deterministic=True, co
"""
Process inputs through consciousness architecture.
"""
start_time = time.time() # Start profiling

try:
# Validate inputs
if not inputs:
Expand All @@ -274,7 +288,7 @@ def forward(self, inputs, state=None, initial_state=None, deterministic=True, co
# Validate state if provided
if state is not None:
error_msg = validate_state(state, (inputs[next(iter(inputs))].size(0), self.hidden_dim))
if error_msg:
if (error_msg):
raise ValueError(f"Invalid state: {error_msg}")

# Initialize attention maps dictionary
Expand Down Expand Up @@ -640,6 +654,9 @@ def forward(self, inputs, state=None, initial_state=None, deterministic=True, co
metrics['cognition_progress'] = cognition_progress
self.logger.debug(f"Cognition Progress: {cognition_progress}%")

end_time = time.time() # End profiling
self.logger.debug(f"forward pass took {end_time - start_time:.6f} seconds")

except Exception as e:
self.error_handler.log_error(
"forward_pass_error",
Expand Down Expand Up @@ -689,6 +706,12 @@ def analyze_model_health(self) -> Dict[str, Any]:
"error_rate": len(self.error_handler.error_history[-100:]) / 100 if self.error_handler.error_history else 0
}

def optimize_memory_usage(self):
"""Optimize memory usage by clearing unnecessary histories"""
self.cognition_progress_history = self.cognition_progress_history[-100:] # Keep last 100 entries
self.state_history = self.state_history[-50:] # Keep last 50 states
self.context_history = self.context_history[-50:] # Keep last 50 contexts

class WorkingMemory(nn.Module):
def __init__(self, input_dim, hidden_dim, dropout_rate):
super().__init__()
Expand Down Expand Up @@ -836,6 +859,7 @@ def forward(self, inputs: Dict[str, torch.Tensor], deterministic: bool = True):
class InformationIntegration(nn.Module):
def __init__(self, hidden_dim: int, num_modules: int, dropout_rate: float):
super().__init__()
self.dropout = nn.Dropout(dropout_rate)
# Store modules in a ModuleList
self.module_list = nn.ModuleList([
nn.Sequential(
Expand Down
Binary file not shown.
105 changes: 101 additions & 4 deletions tests/unit/test_cognition_progress.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@

import pytest
import torch
from models.consciousness_model import ConsciousnessModel
Expand All @@ -21,7 +20,49 @@ def test_calculate_cognition_progress(self, model):
'memory_retention': 0.85
}
progress = model.calculate_cognition_progress(metrics)
expected = (0.3 * 0.7) + (0.2 * 0.8) + (0.15 * 0.65) + (0.15 * 0.75) + (0.2 * 0.85)
# Updated expected calculation based on new weights
expected = (0.25 * 0.7) + (0.15 * 0.8) + (0.1 * 0.65) + (0.1 * 0.75) + (0.15 * 0.85) + (0.15 * 0) + (0.1 * 0)
assert progress == pytest.approx(expected * 100)
assert len(model.cognition_progress_history) == 1

def test_calculate_cognition_progress_missing_metrics(self, model):
metrics = {
'phi': 0.7,
'coherence': 0.8
# Missing 'stability', 'adaptability', 'memory_retention', 'emotional_coherence', 'decision_making_efficiency'
}
progress = model.calculate_cognition_progress(metrics)
expected = (0.25 * 0.7) + (0.15 * 0.8) + (0.1 * 0) + (0.1 * 0) + (0.15 * 0) + (0.15 * 0) + (0.1 * 0)
assert progress == pytest.approx(expected * 100)
assert len(model.cognition_progress_history) == 1

def test_calculate_cognition_progress_extreme_values(self, model):
metrics = {
'phi': 1.0,
'coherence': 1.0,
'stability': 1.0,
'adaptability': 1.0,
'memory_retention': 1.0,
'emotional_coherence': 1.0,
'decision_making_efficiency': 1.0
}
progress = model.calculate_cognition_progress(metrics)
expected = (0.25 * 1.0) + (0.15 * 1.0) + (0.1 * 1.0) + (0.1 * 1.0) + (0.15 * 1.0) + (0.15 * 1.0) + (0.1 * 1.0)
assert progress == pytest.approx(expected * 100)
assert len(model.cognition_progress_history) == 1

def test_optimize_memory_usage(self, model):
metrics = {
'phi': 1.0,
'coherence': 1.0,
'stability': 1.0,
'adaptability': 1.0,
'memory_retention': 1.0,
'emotional_coherence': 1.0,
'decision_making_efficiency': 1.0
}
progress = model.calculate_cognition_progress(metrics)
expected = (0.25 * 1.0) + (0.15 * 1.0) + (0.1 * 1.0) + (0.1 * 1.0) + (0.15 * 1.0) + (0.15 * 1.0) + (0.1 * 1.0)
assert progress == pytest.approx(expected * 100)
assert len(model.cognition_progress_history) == 1

Expand All @@ -39,5 +80,61 @@ def test_report_cognition_progress_with_data(self, model):
}
model.calculate_cognition_progress(metrics)
report = model.report_cognition_progress()
assert "Current Cognition Progress: 75.00%" in report
assert "Areas Needing Improvement:" in report
# Updated expected progress percentage
assert "Current Cognition Progress: 56.25%" in report
assert "Areas Needing Improvement:" in report

def test_report_cognition_progress_with_new_metrics(self, model):
metrics = {
'phi': 0.7,
'coherence': 0.8,
'stability': 0.65,
'adaptability': 0.75,
'memory_retention': 0.85,
'emotional_coherence': 0.55, # Below threshold
'decision_making_efficiency': 0.95
}
model.calculate_cognition_progress(metrics)
report = model.report_cognition_progress()
# Updated expected progress percentage
assert "Current Cognition Progress: 74.00%" in report
assert "Emotional Coherence: 55.00%" in report
assert "Emotional Coherence" in report # Area needing improvement

def test_stress_condition_large_dataset(self, model):
metrics = {
'phi': 0.7,
'coherence': 0.8,
'stability': 0.65,
'adaptability': 0.75,
'memory_retention': 0.85,
'emotional_coherence': 0.9,
'decision_making_efficiency': 0.95
}
# Simulate large history
for _ in range(1000):
model.calculate_cognition_progress(metrics)
assert len(model.cognition_progress_history) == 1000
report = model.report_cognition_progress()
# Updated expected progress percentage
assert "Current Cognition Progress: 79.25%" in report
# Populate cognition_progress_history and histories
metrics = {
'phi': 0.7,
'coherence': 0.8,
'stability': 0.65,
'adaptability': 0.75,
'memory_retention': 0.85,
'emotional_coherence': 0.75,
'decision_making_efficiency': 0.85
}
for _ in range(200):
model.calculate_cognition_progress(metrics)
state = torch.randn(1, model.hidden_dim)
model.state_history.append(state)
model.context_history.append(state)

model.optimize_memory_usage()
assert len(model.cognition_progress_history) == 100
assert len(model.state_history) == 50
assert len(model.context_history) == 50

0 comments on commit 3d3bba1

Please sign in to comment.