Orchestrator changes:
- Force-spawn GAMMA on iteration_limit before abort
- GAMMA.synthesize() creates emergency handoff payload
- loadRecoveryContext() logs "Resuming from {task_id} handoff"
- POST to /api/pipeline/log for resume message visibility
AgentGamma changes:
- Add synthesize() method for emergency abort synthesis
- Merges existing proposals into coherent handoff
- Stores as synthesis_type: "abort_recovery"
Server changes:
- Add POST /api/pipeline/log endpoint for orchestrator logging
- Recovery pipeline properly inherits GAMMA synthesis
Test coverage:
- test_auto_recovery.py: 6 unit tests
- test_e2e_auto_recovery.py: 5 E2E tests
- test_supervisor_recovery.py: 3 supervisor tests
- Success on attempt 2 (recovery works)
- Max failures (3 retries then FAILED)
- Success on attempt 1 (no recovery needed)
Recovery flow:
1. iteration_limit triggers
2. GAMMA force-spawned for emergency synthesis
3. Handoff dumped with GAMMA synthesis
4. Exit code 3 triggers auto-recovery
5. Recovery pipeline loads handoff
6. Logs "Resuming from {prior_pipeline} handoff"
7. Repeat up to 3 times or until success
Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
957 lines
32 KiB
TypeScript
957 lines
32 KiB
TypeScript
/**
|
|
* Multi-Agent Coordination System - Agent Implementations
|
|
*/
|
|
|
|
import OpenAI from "openai";
|
|
import { $ } from "bun";
|
|
import type {
|
|
AgentRole,
|
|
AgentState,
|
|
AgentMessage,
|
|
TaskDefinition,
|
|
BlackboardEntry,
|
|
ConsensusVote,
|
|
} from "./types";
|
|
import {
|
|
Blackboard,
|
|
MessageBus,
|
|
AgentStateManager,
|
|
SpawnController,
|
|
MetricsCollector,
|
|
} from "./coordination";
|
|
|
|
function now(): string {
|
|
return new Date().toISOString();
|
|
}
|
|
|
|
function generateId(): string {
|
|
return Math.random().toString(36).slice(2, 10) + Date.now().toString(36);
|
|
}
|
|
|
|
async function getVaultSecret(path: string): Promise<Record<string, any>> {
|
|
const initKeys = await Bun.file("/opt/vault/init-keys.json").json();
|
|
const token = initKeys.root_token;
|
|
const result = await $`curl -sk -H "X-Vault-Token: ${token}" https://127.0.0.1:8200/v1/secret/data/${path}`.json();
|
|
return result.data.data;
|
|
}
|
|
|
|
// =============================================================================
|
|
// Base Agent Class
|
|
// =============================================================================
|
|
|
|
abstract class BaseAgent {
|
|
protected role: AgentRole;
|
|
protected taskId: string;
|
|
protected blackboard: Blackboard;
|
|
protected messageBus: MessageBus;
|
|
protected stateManager: AgentStateManager;
|
|
protected metrics: MetricsCollector;
|
|
protected llm!: OpenAI;
|
|
protected model: string;
|
|
protected state: AgentState;
|
|
protected startTime: number;
|
|
protected log: (msg: string) => void;
|
|
|
|
constructor(
|
|
role: AgentRole,
|
|
taskId: string,
|
|
blackboard: Blackboard,
|
|
messageBus: MessageBus,
|
|
stateManager: AgentStateManager,
|
|
metrics: MetricsCollector,
|
|
model: string = "anthropic/claude-sonnet-4"
|
|
) {
|
|
this.role = role;
|
|
this.taskId = taskId;
|
|
this.blackboard = blackboard;
|
|
this.messageBus = messageBus;
|
|
this.stateManager = stateManager;
|
|
this.metrics = metrics;
|
|
this.model = model;
|
|
this.startTime = Date.now();
|
|
|
|
this.state = {
|
|
agent_id: `${role}-${taskId}`,
|
|
role,
|
|
status: "IDLE",
|
|
current_task: "",
|
|
progress: 0,
|
|
confidence: 0,
|
|
last_activity: now(),
|
|
messages_sent: 0,
|
|
messages_received: 0,
|
|
proposals_made: 0,
|
|
conflicts_detected: 0,
|
|
};
|
|
|
|
this.log = (msg: string) => {
|
|
const elapsed = ((Date.now() - this.startTime) / 1000).toFixed(1);
|
|
console.log(`[${elapsed}s] [${this.role}] ${msg}`);
|
|
};
|
|
}
|
|
|
|
async init(): Promise<void> {
|
|
const secrets = await getVaultSecret("api-keys/openrouter");
|
|
this.llm = new OpenAI({
|
|
baseURL: "https://openrouter.ai/api/v1",
|
|
apiKey: secrets.api_key,
|
|
});
|
|
|
|
// Register message handler
|
|
this.messageBus.onMessage(this.role, (msg) => this.handleMessage(msg));
|
|
|
|
await this.updateState({ status: "IDLE" });
|
|
this.log("Initialized");
|
|
}
|
|
|
|
protected async updateState(partial: Partial<AgentState>): Promise<void> {
|
|
Object.assign(this.state, partial);
|
|
this.state.last_activity = now();
|
|
await this.stateManager.updateState(this.state);
|
|
}
|
|
|
|
protected async handleMessage(msg: AgentMessage): Promise<void> {
|
|
this.state.messages_received++;
|
|
this.log(`Received ${msg.type} from ${msg.from}`);
|
|
|
|
// Override in subclasses for specific handling
|
|
}
|
|
|
|
protected async sendMessage(to: AgentRole | "ALL", type: AgentMessage["type"], payload: any, correlationId?: string): Promise<void> {
|
|
await this.messageBus.send(to, type, payload, correlationId);
|
|
this.state.messages_sent++;
|
|
await this.updateState({});
|
|
}
|
|
|
|
protected async writeToBlackboard(section: Parameters<Blackboard["write"]>[0], key: string, value: any): Promise<void> {
|
|
await this.blackboard.write(section, key, value, this.role);
|
|
this.log(`Wrote to blackboard: ${section}/${key}`);
|
|
}
|
|
|
|
protected async readFromBlackboard(section: Parameters<Blackboard["read"]>[0], key: string): Promise<any> {
|
|
const entry = await this.blackboard.read(section, key);
|
|
return entry?.value;
|
|
}
|
|
|
|
protected async callLLM(systemPrompt: string, userPrompt: string, maxTokens: number = 2000): Promise<string> {
|
|
const response = await this.llm.chat.completions.create({
|
|
model: this.model,
|
|
messages: [
|
|
{ role: "system", content: systemPrompt },
|
|
{ role: "user", content: userPrompt },
|
|
],
|
|
max_tokens: maxTokens,
|
|
temperature: 0.4,
|
|
});
|
|
return response.choices[0].message.content || "";
|
|
}
|
|
|
|
abstract run(task: TaskDefinition): Promise<void>;
|
|
}
|
|
|
|
// =============================================================================
|
|
// Agent ALPHA - Research & Analysis Specialist
|
|
// =============================================================================
|
|
|
|
export class AgentAlpha extends BaseAgent {
|
|
private proposals: Map<string, any> = new Map();
|
|
|
|
constructor(
|
|
taskId: string,
|
|
blackboard: Blackboard,
|
|
messageBus: MessageBus,
|
|
stateManager: AgentStateManager,
|
|
metrics: MetricsCollector,
|
|
model?: string
|
|
) {
|
|
super("ALPHA", taskId, blackboard, messageBus, stateManager, metrics, model);
|
|
}
|
|
|
|
protected async handleMessage(msg: AgentMessage): Promise<void> {
|
|
await super.handleMessage(msg);
|
|
|
|
switch (msg.type) {
|
|
case "FEEDBACK":
|
|
await this.handleFeedback(msg);
|
|
break;
|
|
case "QUERY":
|
|
await this.handleQuery(msg);
|
|
break;
|
|
case "SYNC":
|
|
await this.handleSync(msg);
|
|
break;
|
|
}
|
|
}
|
|
|
|
private async handleFeedback(msg: AgentMessage): Promise<void> {
|
|
const { proposal_id, accepted, reasoning, suggestions } = msg.payload;
|
|
this.log(`Received feedback on proposal ${proposal_id}: ${accepted ? "ACCEPTED" : "REJECTED"}`);
|
|
|
|
if (!accepted && suggestions) {
|
|
// Revise proposal based on feedback
|
|
const original = this.proposals.get(proposal_id);
|
|
if (original) {
|
|
await this.reviseProposal(proposal_id, original, suggestions);
|
|
}
|
|
}
|
|
}
|
|
|
|
private async handleQuery(msg: AgentMessage): Promise<void> {
|
|
const { question, context } = msg.payload;
|
|
this.log(`Answering query from ${msg.from}: ${question.slice(0, 50)}...`);
|
|
|
|
const answer = await this.analyzeQuestion(question, context);
|
|
await this.sendMessage(msg.from as AgentRole, "RESPONSE", {
|
|
question,
|
|
answer,
|
|
confidence: answer.confidence,
|
|
}, msg.id);
|
|
}
|
|
|
|
private async handleSync(msg: AgentMessage): Promise<void> {
|
|
// Share current state with other agents
|
|
const currentProposals = Array.from(this.proposals.entries());
|
|
await this.sendMessage(msg.from as AgentRole, "RESPONSE", {
|
|
proposals: currentProposals,
|
|
progress: this.state.progress,
|
|
current_task: this.state.current_task,
|
|
}, msg.id);
|
|
}
|
|
|
|
private async analyzeQuestion(question: string, context: any): Promise<any> {
|
|
const response = await this.callLLM(
|
|
`You are Agent ALPHA, a research and analysis specialist. Answer questions thoroughly and provide confidence scores.
|
|
Output JSON: { "answer": "...", "confidence": 0.0-1.0, "supporting_evidence": [...], "uncertainties": [...] }`,
|
|
`Question: ${question}\nContext: ${JSON.stringify(context)}`
|
|
);
|
|
|
|
try {
|
|
const match = response.match(/\{[\s\S]*\}/);
|
|
return match ? JSON.parse(match[0]) : { answer: response, confidence: 0.5 };
|
|
} catch {
|
|
return { answer: response, confidence: 0.5 };
|
|
}
|
|
}
|
|
|
|
private async reviseProposal(proposalId: string, original: any, suggestions: string[]): Promise<void> {
|
|
this.log(`Revising proposal ${proposalId} based on feedback`);
|
|
|
|
const response = await this.callLLM(
|
|
`You are Agent ALPHA. Revise the proposal based on the feedback suggestions.
|
|
Output JSON: { "revised_proposal": {...}, "changes_made": [...], "confidence": 0.0-1.0 }`,
|
|
`Original proposal: ${JSON.stringify(original)}\nSuggestions: ${suggestions.join(", ")}`
|
|
);
|
|
|
|
try {
|
|
const match = response.match(/\{[\s\S]*\}/);
|
|
if (match) {
|
|
const revised = JSON.parse(match[0]);
|
|
const newProposalId = proposalId + "-rev";
|
|
this.proposals.set(newProposalId, revised.revised_proposal);
|
|
await this.writeToBlackboard("solutions", newProposalId, revised);
|
|
await this.sendMessage("BETA", "PROPOSAL", {
|
|
proposal_id: newProposalId,
|
|
proposal: revised.revised_proposal,
|
|
is_revision: true,
|
|
original_id: proposalId,
|
|
});
|
|
this.state.proposals_made++;
|
|
}
|
|
} catch (e) {
|
|
this.log(`Failed to revise proposal: ${e}`);
|
|
}
|
|
}
|
|
|
|
async run(task: TaskDefinition): Promise<void> {
|
|
await this.updateState({ status: "WORKING", current_task: "Analyzing problem" });
|
|
this.log(`Starting analysis of: ${task.objective}`);
|
|
|
|
// Phase 1: Problem Analysis
|
|
await this.writeToBlackboard("problem", "objective", task.objective);
|
|
await this.writeToBlackboard("problem", "constraints", task.constraints);
|
|
|
|
const analysis = await this.analyzeProblem(task);
|
|
await this.writeToBlackboard("problem", "analysis", analysis);
|
|
this.log(`Problem analysis complete. Complexity: ${analysis.complexity_score}`);
|
|
|
|
await this.updateState({ progress: 0.2 });
|
|
|
|
// Phase 2: Generate Initial Proposals
|
|
await this.updateState({ current_task: "Generating solution proposals" });
|
|
|
|
const proposals = await this.generateProposals(task, analysis);
|
|
for (const proposal of proposals) {
|
|
const proposalId = generateId();
|
|
this.proposals.set(proposalId, proposal);
|
|
await this.writeToBlackboard("solutions", proposalId, proposal);
|
|
|
|
// Send proposal to BETA for evaluation
|
|
await this.sendMessage("BETA", "PROPOSAL", {
|
|
proposal_id: proposalId,
|
|
proposal,
|
|
phase: "initial",
|
|
});
|
|
this.state.proposals_made++;
|
|
}
|
|
|
|
this.log(`Generated ${proposals.length} initial proposals`);
|
|
await this.updateState({ progress: 0.5 });
|
|
|
|
// Phase 3: Iterative Refinement Loop
|
|
await this.updateState({ current_task: "Refining solutions" });
|
|
let iterations = 0;
|
|
const maxIterations = 5;
|
|
|
|
while (iterations < maxIterations && this.state.progress < 0.9) {
|
|
iterations++;
|
|
this.log(`Refinement iteration ${iterations}`);
|
|
|
|
// Read feedback from blackboard
|
|
const feedbackEntries = await this.blackboard.readSection("progress");
|
|
const latestFeedback = feedbackEntries
|
|
.filter(e => e.author === "BETA")
|
|
.sort((a, b) => new Date(b.timestamp).getTime() - new Date(a.timestamp).getTime())[0];
|
|
|
|
if (latestFeedback?.value?.needs_revision) {
|
|
// More work needed
|
|
await this.refineBasedOnFeedback(latestFeedback.value);
|
|
}
|
|
|
|
await this.updateState({ progress: Math.min(0.9, this.state.progress + 0.1) });
|
|
await new Promise(r => setTimeout(r, 500)); // Brief pause between iterations
|
|
}
|
|
|
|
await this.updateState({ status: "WAITING", current_task: "Awaiting consensus", progress: 0.9 });
|
|
this.log("Analysis phase complete, awaiting consensus");
|
|
}
|
|
|
|
private async analyzeProblem(task: TaskDefinition): Promise<any> {
|
|
const response = await this.callLLM(
|
|
`You are Agent ALPHA, a research and analysis specialist. Analyze the problem thoroughly.
|
|
Output JSON: {
|
|
"complexity_score": 0.0-1.0,
|
|
"key_challenges": [...],
|
|
"dependencies": [...],
|
|
"risks": [...],
|
|
"recommended_approach": "...",
|
|
"subtask_breakdown": [...]
|
|
}`,
|
|
`Objective: ${task.objective}\nConstraints: ${task.constraints.join(", ")}\nSubtasks: ${task.subtasks.map(s => s.description).join(", ")}`
|
|
);
|
|
|
|
try {
|
|
const match = response.match(/\{[\s\S]*\}/);
|
|
return match ? JSON.parse(match[0]) : { complexity_score: 0.5, key_challenges: [response] };
|
|
} catch {
|
|
return { complexity_score: 0.5, key_challenges: [response] };
|
|
}
|
|
}
|
|
|
|
private async generateProposals(task: TaskDefinition, analysis: any): Promise<any[]> {
|
|
const response = await this.callLLM(
|
|
`You are Agent ALPHA. Generate 2-3 distinct solution proposals based on the analysis.
|
|
Output JSON array: [
|
|
{ "name": "...", "approach": "...", "steps": [...], "pros": [...], "cons": [...], "confidence": 0.0-1.0 },
|
|
...
|
|
]`,
|
|
`Task: ${task.objective}\nAnalysis: ${JSON.stringify(analysis)}\nConstraints: ${task.constraints.join(", ")}`
|
|
);
|
|
|
|
try {
|
|
const match = response.match(/\[[\s\S]*\]/);
|
|
return match ? JSON.parse(match[0]) : [{ name: "Default", approach: response, steps: [], confidence: 0.5 }];
|
|
} catch {
|
|
return [{ name: "Default", approach: response, steps: [], confidence: 0.5 }];
|
|
}
|
|
}
|
|
|
|
private async refineBasedOnFeedback(feedback: any): Promise<void> {
|
|
const { proposal_id, issues, suggestions } = feedback;
|
|
if (proposal_id && this.proposals.has(proposal_id)) {
|
|
await this.reviseProposal(proposal_id, this.proposals.get(proposal_id), suggestions || []);
|
|
}
|
|
}
|
|
}
|
|
|
|
// =============================================================================
|
|
// Agent BETA - Implementation & Synthesis Specialist
|
|
// =============================================================================
|
|
|
|
export class AgentBeta extends BaseAgent {
|
|
private evaluatedProposals: Map<string, any> = new Map();
|
|
|
|
constructor(
|
|
taskId: string,
|
|
blackboard: Blackboard,
|
|
messageBus: MessageBus,
|
|
stateManager: AgentStateManager,
|
|
metrics: MetricsCollector,
|
|
model?: string
|
|
) {
|
|
super("BETA", taskId, blackboard, messageBus, stateManager, metrics, model);
|
|
}
|
|
|
|
protected async handleMessage(msg: AgentMessage): Promise<void> {
|
|
await super.handleMessage(msg);
|
|
|
|
switch (msg.type) {
|
|
case "PROPOSAL":
|
|
await this.evaluateProposal(msg);
|
|
break;
|
|
case "QUERY":
|
|
await this.handleQuery(msg);
|
|
break;
|
|
case "SYNC":
|
|
await this.handleSync(msg);
|
|
break;
|
|
}
|
|
}
|
|
|
|
private async evaluateProposal(msg: AgentMessage): Promise<void> {
|
|
const { proposal_id, proposal, phase, is_revision } = msg.payload;
|
|
this.log(`Evaluating proposal ${proposal_id} (${is_revision ? "revision" : phase})`);
|
|
|
|
await this.updateState({ current_task: `Evaluating proposal ${proposal_id}` });
|
|
|
|
const evaluation = await this.performEvaluation(proposal);
|
|
this.evaluatedProposals.set(proposal_id, { proposal, evaluation });
|
|
|
|
// Write evaluation to blackboard
|
|
await this.writeToBlackboard("progress", `eval_${proposal_id}`, {
|
|
proposal_id,
|
|
evaluation,
|
|
evaluator: this.role,
|
|
timestamp: now(),
|
|
});
|
|
|
|
// Send feedback to ALPHA
|
|
const accepted = evaluation.score >= 0.7 && evaluation.feasibility >= 0.6;
|
|
await this.sendMessage("ALPHA", "FEEDBACK", {
|
|
proposal_id,
|
|
accepted,
|
|
score: evaluation.score,
|
|
reasoning: evaluation.reasoning,
|
|
suggestions: evaluation.improvements,
|
|
needs_revision: !accepted,
|
|
}, msg.id);
|
|
|
|
if (accepted) {
|
|
this.log(`Proposal ${proposal_id} accepted with score ${evaluation.score}`);
|
|
// Cast vote for consensus
|
|
const vote: ConsensusVote = {
|
|
agent: this.role,
|
|
proposal_id,
|
|
vote: "ACCEPT",
|
|
reasoning: evaluation.reasoning,
|
|
timestamp: now(),
|
|
};
|
|
await this.blackboard.recordVote(vote);
|
|
} else {
|
|
this.log(`Proposal ${proposal_id} needs revision. Score: ${evaluation.score}`);
|
|
await this.metrics.increment("conflicts_detected");
|
|
this.state.conflicts_detected++;
|
|
}
|
|
}
|
|
|
|
private async handleQuery(msg: AgentMessage): Promise<void> {
|
|
const { question, context } = msg.payload;
|
|
this.log(`Answering query from ${msg.from}`);
|
|
|
|
const answer = await this.synthesizeAnswer(question, context);
|
|
await this.sendMessage(msg.from as AgentRole, "RESPONSE", {
|
|
question,
|
|
answer,
|
|
}, msg.id);
|
|
}
|
|
|
|
private async handleSync(msg: AgentMessage): Promise<void> {
|
|
const evaluations = Array.from(this.evaluatedProposals.entries());
|
|
await this.sendMessage(msg.from as AgentRole, "RESPONSE", {
|
|
evaluations,
|
|
progress: this.state.progress,
|
|
}, msg.id);
|
|
}
|
|
|
|
private async performEvaluation(proposal: any): Promise<any> {
|
|
const response = await this.callLLM(
|
|
`You are Agent BETA, an implementation and synthesis specialist. Evaluate this proposal critically.
|
|
Output JSON: {
|
|
"score": 0.0-1.0,
|
|
"feasibility": 0.0-1.0,
|
|
"completeness": 0.0-1.0,
|
|
"reasoning": "...",
|
|
"strengths": [...],
|
|
"weaknesses": [...],
|
|
"improvements": [...],
|
|
"implementation_notes": "..."
|
|
}`,
|
|
`Proposal: ${JSON.stringify(proposal)}`
|
|
);
|
|
|
|
try {
|
|
const match = response.match(/\{[\s\S]*\}/);
|
|
return match ? JSON.parse(match[0]) : { score: 0.5, feasibility: 0.5, reasoning: response };
|
|
} catch {
|
|
return { score: 0.5, feasibility: 0.5, reasoning: response };
|
|
}
|
|
}
|
|
|
|
private async synthesizeAnswer(question: string, context: any): Promise<any> {
|
|
const response = await this.callLLM(
|
|
`You are Agent BETA. Provide a practical, implementation-focused answer.`,
|
|
`Question: ${question}\nContext: ${JSON.stringify(context)}`
|
|
);
|
|
return { answer: response, source: this.role };
|
|
}
|
|
|
|
async run(task: TaskDefinition): Promise<void> {
|
|
await this.updateState({ status: "WORKING", current_task: "Preparing for evaluation" });
|
|
this.log(`Starting evaluation mode for: ${task.objective}`);
|
|
|
|
// Phase 1: Read and understand the problem from blackboard
|
|
await new Promise(r => setTimeout(r, 1000)); // Wait for ALPHA to write problem analysis
|
|
|
|
const problemAnalysis = await this.readFromBlackboard("problem", "analysis");
|
|
if (problemAnalysis) {
|
|
this.log(`Read problem analysis: complexity ${problemAnalysis.complexity_score}`);
|
|
}
|
|
|
|
await this.updateState({ progress: 0.1 });
|
|
|
|
// Phase 2: Active evaluation loop - wait for proposals and evaluate
|
|
await this.updateState({ current_task: "Evaluating proposals" });
|
|
|
|
let iterations = 0;
|
|
const maxIterations = 10;
|
|
|
|
while (iterations < maxIterations && this.state.progress < 0.9) {
|
|
iterations++;
|
|
|
|
// Check for new proposals on blackboard
|
|
const solutions = await this.blackboard.readSection("solutions");
|
|
const newProposals = solutions.filter(s =>
|
|
!this.evaluatedProposals.has(s.key) && s.author === "ALPHA"
|
|
);
|
|
|
|
for (const entry of newProposals) {
|
|
// Evaluate via message handling (simulated direct read)
|
|
if (!this.evaluatedProposals.has(entry.key)) {
|
|
await this.evaluateProposal({
|
|
id: generateId(),
|
|
from: "ALPHA",
|
|
to: "BETA",
|
|
type: "PROPOSAL",
|
|
payload: {
|
|
proposal_id: entry.key,
|
|
proposal: entry.value,
|
|
phase: "blackboard_read",
|
|
},
|
|
timestamp: now(),
|
|
});
|
|
}
|
|
}
|
|
|
|
await this.updateState({ progress: Math.min(0.9, 0.1 + (iterations * 0.08)) });
|
|
await new Promise(r => setTimeout(r, 500));
|
|
}
|
|
|
|
// Phase 3: Generate synthesis of best solutions
|
|
await this.updateState({ current_task: "Synthesizing final solution" });
|
|
|
|
const bestProposals = Array.from(this.evaluatedProposals.entries())
|
|
.filter(([_, v]) => v.evaluation.score >= 0.6)
|
|
.sort((a, b) => b[1].evaluation.score - a[1].evaluation.score);
|
|
|
|
if (bestProposals.length > 0) {
|
|
const synthesis = await this.synthesizeSolution(bestProposals.map(([_, v]) => v));
|
|
await this.writeToBlackboard("solutions", "synthesis", synthesis);
|
|
this.log(`Generated synthesis from ${bestProposals.length} proposals`);
|
|
|
|
// Vote for synthesis
|
|
const vote: ConsensusVote = {
|
|
agent: this.role,
|
|
proposal_id: "synthesis",
|
|
vote: "ACCEPT",
|
|
reasoning: "Synthesized best elements from top proposals",
|
|
timestamp: now(),
|
|
};
|
|
await this.blackboard.recordVote(vote);
|
|
}
|
|
|
|
await this.updateState({ status: "WAITING", current_task: "Awaiting consensus", progress: 0.9 });
|
|
this.log("Evaluation phase complete, awaiting consensus");
|
|
}
|
|
|
|
private async synthesizeSolution(proposals: any[]): Promise<any> {
|
|
const response = await this.callLLM(
|
|
`You are Agent BETA. Synthesize the best elements from these evaluated proposals into a final solution.
|
|
Output JSON: {
|
|
"final_solution": { "name": "...", "approach": "...", "steps": [...] },
|
|
"confidence": 0.0-1.0,
|
|
"sources": [...],
|
|
"trade_offs": [...]
|
|
}`,
|
|
`Proposals and evaluations: ${JSON.stringify(proposals)}`
|
|
);
|
|
|
|
try {
|
|
const match = response.match(/\{[\s\S]*\}/);
|
|
return match ? JSON.parse(match[0]) : { final_solution: { approach: response }, confidence: 0.5 };
|
|
} catch {
|
|
return { final_solution: { approach: response }, confidence: 0.5 };
|
|
}
|
|
}
|
|
}
|
|
|
|
// =============================================================================
|
|
// Agent GAMMA - Mediator & Resolver (Conditionally Spawned)
|
|
// =============================================================================
|
|
|
|
export class AgentGamma extends BaseAgent {
|
|
private spawnReason: string;
|
|
|
|
constructor(
|
|
taskId: string,
|
|
blackboard: Blackboard,
|
|
messageBus: MessageBus,
|
|
stateManager: AgentStateManager,
|
|
metrics: MetricsCollector,
|
|
spawnReason: string,
|
|
model?: string
|
|
) {
|
|
super("GAMMA", taskId, blackboard, messageBus, stateManager, metrics, model);
|
|
this.spawnReason = spawnReason;
|
|
}
|
|
|
|
protected async handleMessage(msg: AgentMessage): Promise<void> {
|
|
await super.handleMessage(msg);
|
|
|
|
switch (msg.type) {
|
|
case "QUERY":
|
|
await this.handleQuery(msg);
|
|
break;
|
|
case "HANDOFF":
|
|
await this.handleHandoff(msg);
|
|
break;
|
|
}
|
|
}
|
|
|
|
private async handleQuery(msg: AgentMessage): Promise<void> {
|
|
const { question, context } = msg.payload;
|
|
const answer = await this.mediateQuestion(question, context);
|
|
await this.sendMessage(msg.from as AgentRole, "RESPONSE", answer, msg.id);
|
|
}
|
|
|
|
private async handleHandoff(msg: AgentMessage): Promise<void> {
|
|
this.log(`Received handoff from ${msg.from}: ${JSON.stringify(msg.payload).slice(0, 100)}...`);
|
|
}
|
|
|
|
private async mediateQuestion(question: string, context: any): Promise<any> {
|
|
const response = await this.callLLM(
|
|
`You are Agent GAMMA, a mediator and conflict resolver. Provide balanced, integrative answers.`,
|
|
`Question: ${question}\nContext: ${JSON.stringify(context)}`
|
|
);
|
|
return { answer: response, mediator: true };
|
|
}
|
|
|
|
async run(task: TaskDefinition): Promise<void> {
|
|
await this.updateState({ status: "WORKING", current_task: `Resolving: ${this.spawnReason}` });
|
|
this.log(`GAMMA spawned for: ${this.spawnReason}`);
|
|
|
|
// Announce presence
|
|
await this.sendMessage("ALL", "SYNC", {
|
|
event: "GAMMA_SPAWNED",
|
|
reason: this.spawnReason,
|
|
timestamp: now(),
|
|
});
|
|
|
|
// Phase 1: Gather current state from all sources
|
|
const problemState = await this.blackboard.readSection("problem");
|
|
const solutionsState = await this.blackboard.readSection("solutions");
|
|
const progressState = await this.blackboard.readSection("progress");
|
|
const conflictsState = await this.blackboard.readSection("conflicts");
|
|
|
|
this.log(`Gathered state: ${problemState.length} problem entries, ${solutionsState.length} solutions`);
|
|
|
|
await this.updateState({ progress: 0.2 });
|
|
|
|
// Phase 2: Analyze situation based on spawn reason
|
|
let resolution: any;
|
|
|
|
switch (this.spawnReason) {
|
|
case "STUCK":
|
|
resolution = await this.resolveStuck(task, problemState, solutionsState, progressState);
|
|
break;
|
|
case "CONFLICT":
|
|
resolution = await this.resolveConflict(conflictsState, solutionsState);
|
|
break;
|
|
case "COMPLEXITY":
|
|
resolution = await this.handleComplexity(task, problemState, solutionsState);
|
|
break;
|
|
case "SUCCESS":
|
|
resolution = await this.validateSuccess(task, solutionsState);
|
|
break;
|
|
default:
|
|
resolution = await this.generalMediation(task, problemState, solutionsState);
|
|
}
|
|
|
|
await this.writeToBlackboard("progress", "gamma_resolution", resolution);
|
|
await this.updateState({ progress: 0.7 });
|
|
|
|
// Phase 3: Drive to consensus
|
|
await this.updateState({ current_task: "Driving consensus" });
|
|
|
|
const consensusResult = await this.driveConsensus(resolution);
|
|
await this.writeToBlackboard("consensus", "final", consensusResult);
|
|
|
|
// Cast final vote
|
|
const vote: ConsensusVote = {
|
|
agent: this.role,
|
|
proposal_id: "final_consensus",
|
|
vote: consensusResult.achieved ? "ACCEPT" : "ABSTAIN",
|
|
reasoning: consensusResult.reasoning,
|
|
timestamp: now(),
|
|
};
|
|
await this.blackboard.recordVote(vote);
|
|
|
|
await this.updateState({ status: "COMPLETED", progress: 1.0, current_task: "Resolution complete" });
|
|
this.log(`GAMMA resolution complete. Consensus: ${consensusResult.achieved}`);
|
|
}
|
|
|
|
private async resolveStuck(task: TaskDefinition, problem: BlackboardEntry[], solutions: BlackboardEntry[], progress: BlackboardEntry[]): Promise<any> {
|
|
this.log("Analyzing stuck condition...");
|
|
|
|
const response = await this.callLLM(
|
|
`You are Agent GAMMA, a mediator. The other agents appear stuck. Analyze the situation and provide direction.
|
|
Output JSON: {
|
|
"diagnosis": "why agents are stuck",
|
|
"blockers": [...],
|
|
"recommended_actions": [...],
|
|
"new_approach": "...",
|
|
"confidence": 0.0-1.0
|
|
}`,
|
|
`Task: ${task.objective}\nProblem analysis: ${JSON.stringify(problem)}\nSolutions so far: ${JSON.stringify(solutions)}\nProgress: ${JSON.stringify(progress)}`
|
|
);
|
|
|
|
try {
|
|
const match = response.match(/\{[\s\S]*\}/);
|
|
const result = match ? JSON.parse(match[0]) : { diagnosis: response, confidence: 0.5 };
|
|
|
|
// Broadcast new direction to other agents
|
|
await this.sendMessage("ALL", "HANDOFF", {
|
|
type: "NEW_DIRECTION",
|
|
...result,
|
|
});
|
|
|
|
return result;
|
|
} catch {
|
|
return { diagnosis: response, confidence: 0.5 };
|
|
}
|
|
}
|
|
|
|
private async resolveConflict(conflicts: BlackboardEntry[], solutions: BlackboardEntry[]): Promise<any> {
|
|
this.log("Mediating conflicts...");
|
|
|
|
const response = await this.callLLM(
|
|
`You are Agent GAMMA, a conflict mediator. Resolve the conflicts between proposals.
|
|
Output JSON: {
|
|
"conflicts_analyzed": [...],
|
|
"resolution": "...",
|
|
"compromise_solution": {...},
|
|
"reasoning": "...",
|
|
"confidence": 0.0-1.0
|
|
}`,
|
|
`Conflicts: ${JSON.stringify(conflicts)}\nSolutions: ${JSON.stringify(solutions)}`
|
|
);
|
|
|
|
try {
|
|
const match = response.match(/\{[\s\S]*\}/);
|
|
const result = match ? JSON.parse(match[0]) : { resolution: response, confidence: 0.5 };
|
|
|
|
await this.metrics.increment("conflicts_resolved");
|
|
return result;
|
|
} catch {
|
|
return { resolution: response, confidence: 0.5 };
|
|
}
|
|
}
|
|
|
|
private async handleComplexity(task: TaskDefinition, problem: BlackboardEntry[], solutions: BlackboardEntry[]): Promise<any> {
|
|
this.log("Breaking down complexity...");
|
|
|
|
const response = await this.callLLM(
|
|
`You are Agent GAMMA. The task is too complex. Break it into manageable pieces.
|
|
Output JSON: {
|
|
"complexity_analysis": "...",
|
|
"decomposition": [...subtasks...],
|
|
"priority_order": [...],
|
|
"delegation": { "ALPHA": [...], "BETA": [...] },
|
|
"confidence": 0.0-1.0
|
|
}`,
|
|
`Task: ${task.objective}\nProblem: ${JSON.stringify(problem)}\nCurrent solutions: ${JSON.stringify(solutions)}`
|
|
);
|
|
|
|
try {
|
|
const match = response.match(/\{[\s\S]*\}/);
|
|
const result = match ? JSON.parse(match[0]) : { decomposition: [response], confidence: 0.5 };
|
|
|
|
// Delegate subtasks
|
|
if (result.delegation) {
|
|
if (result.delegation.ALPHA) {
|
|
await this.sendMessage("ALPHA", "HANDOFF", {
|
|
type: "SUBTASK_ASSIGNMENT",
|
|
tasks: result.delegation.ALPHA,
|
|
});
|
|
}
|
|
if (result.delegation.BETA) {
|
|
await this.sendMessage("BETA", "HANDOFF", {
|
|
type: "SUBTASK_ASSIGNMENT",
|
|
tasks: result.delegation.BETA,
|
|
});
|
|
}
|
|
}
|
|
|
|
return result;
|
|
} catch {
|
|
return { decomposition: [response], confidence: 0.5 };
|
|
}
|
|
}
|
|
|
|
private async validateSuccess(task: TaskDefinition, solutions: BlackboardEntry[]): Promise<any> {
|
|
this.log("Validating task success...");
|
|
|
|
const response = await this.callLLM(
|
|
`You are Agent GAMMA. Validate that the task has been successfully completed.
|
|
Output JSON: {
|
|
"success": true/false,
|
|
"criteria_met": [...],
|
|
"criteria_unmet": [...],
|
|
"final_assessment": "...",
|
|
"recommendations": [...],
|
|
"confidence": 0.0-1.0
|
|
}`,
|
|
`Task: ${task.objective}\nSuccess criteria: ${task.success_criteria.join(", ")}\nSolutions: ${JSON.stringify(solutions)}`
|
|
);
|
|
|
|
try {
|
|
const match = response.match(/\{[\s\S]*\}/);
|
|
return match ? JSON.parse(match[0]) : { success: false, final_assessment: response };
|
|
} catch {
|
|
return { success: false, final_assessment: response };
|
|
}
|
|
}
|
|
|
|
private async generalMediation(task: TaskDefinition, problem: BlackboardEntry[], solutions: BlackboardEntry[]): Promise<any> {
|
|
this.log("General mediation...");
|
|
|
|
const response = await this.callLLM(
|
|
`You are Agent GAMMA, a general mediator. Help coordinate the other agents toward a solution.
|
|
Output JSON: {
|
|
"assessment": "...",
|
|
"recommendations": [...],
|
|
"next_steps": [...],
|
|
"confidence": 0.0-1.0
|
|
}`,
|
|
`Task: ${task.objective}\nProblem: ${JSON.stringify(problem)}\nSolutions: ${JSON.stringify(solutions)}`
|
|
);
|
|
|
|
try {
|
|
const match = response.match(/\{[\s\S]*\}/);
|
|
return match ? JSON.parse(match[0]) : { assessment: response, confidence: 0.5 };
|
|
} catch {
|
|
return { assessment: response, confidence: 0.5 };
|
|
}
|
|
}
|
|
|
|
private async driveConsensus(resolution: any): Promise<any> {
|
|
this.log("Driving to consensus...");
|
|
|
|
// Check existing votes
|
|
const synthesisVotes = await this.blackboard.getVotes("synthesis");
|
|
const acceptVotes = synthesisVotes.filter(v => v.vote === "ACCEPT");
|
|
|
|
const response = await this.callLLM(
|
|
`You are Agent GAMMA. Based on the resolution and votes, determine if consensus is achieved.
|
|
Output JSON: {
|
|
"achieved": true/false,
|
|
"reasoning": "...",
|
|
"final_decision": "...",
|
|
"dissenting_views": [...],
|
|
"action_items": [...]
|
|
}`,
|
|
`Resolution: ${JSON.stringify(resolution)}\nVotes: ${JSON.stringify(synthesisVotes)}\nAccept count: ${acceptVotes.length}`
|
|
);
|
|
|
|
try {
|
|
const match = response.match(/\{[\s\S]*\}/);
|
|
return match ? JSON.parse(match[0]) : { achieved: acceptVotes.length >= 2, reasoning: response };
|
|
} catch {
|
|
return { achieved: acceptVotes.length >= 2, reasoning: response };
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Quick synthesis for abort recovery - synthesizes existing proposals into a handoff payload.
|
|
* Called when iteration_limit is hit to preserve work for the next run.
|
|
*/
|
|
async synthesize(task: TaskDefinition): Promise<any> {
|
|
this.log("ABORT SYNTHESIS: Creating handoff payload from existing proposals...");
|
|
|
|
await this.updateState({ status: "WORKING", current_task: "Emergency synthesis for handoff" });
|
|
|
|
// Gather all current work
|
|
const solutions = await this.blackboard.readSection("solutions");
|
|
const synthesis = await this.blackboard.readSection("synthesis");
|
|
const problem = await this.blackboard.readSection("problem");
|
|
|
|
if (solutions.length === 0) {
|
|
this.log("No proposals to synthesize");
|
|
return { synthesized: false, reason: "no_proposals" };
|
|
}
|
|
|
|
this.log(`Synthesizing ${solutions.length} proposals...`);
|
|
|
|
try {
|
|
const response = await this.callLLM(
|
|
`You are Agent GAMMA performing emergency synthesis before abort. Quickly merge the best ideas from all proposals into a coherent handoff document.
|
|
Output JSON: {
|
|
"merged_solution": "comprehensive merged solution incorporating best elements",
|
|
"key_insights": ["insight1", "insight2", ...],
|
|
"unresolved_issues": ["issue1", "issue2", ...],
|
|
"recommended_focus": "what the next run should prioritize",
|
|
"confidence": 0.0-1.0
|
|
}`,
|
|
`Task: ${task.objective}\nProposals: ${JSON.stringify(solutions.map(s => ({ author: s.author, value: s.value })))}\nPrior synthesis attempts: ${JSON.stringify(synthesis.map(s => s.value))}`
|
|
);
|
|
|
|
let result;
|
|
try {
|
|
const match = response.match(/\{[\s\S]*\}/);
|
|
result = match ? JSON.parse(match[0]) : { merged_solution: response, confidence: 0.5 };
|
|
} catch {
|
|
result = { merged_solution: response, confidence: 0.5 };
|
|
}
|
|
|
|
// Store the synthesis result for handoff
|
|
await this.writeToBlackboard("synthesis", "gamma_emergency_synthesis", {
|
|
...result,
|
|
synthesis_type: "abort_recovery",
|
|
timestamp: now(),
|
|
proposals_merged: solutions.length
|
|
});
|
|
|
|
this.log(`Synthesis complete: ${result.key_insights?.length || 0} insights, confidence ${result.confidence}`);
|
|
|
|
await this.updateState({ status: "COMPLETED", progress: 1.0 });
|
|
|
|
return { synthesized: true, ...result };
|
|
|
|
} catch (e: any) {
|
|
this.log(`Synthesis failed: ${e.message}`);
|
|
await this.updateState({ status: "FAILED" });
|
|
return { synthesized: false, error: e.message };
|
|
}
|
|
}
|
|
}
|
|
|
|
export { BaseAgent };
|