feat(llm): get rid of unused code

This commit is contained in:
perf3ct 2025-10-10 13:28:03 -07:00
parent 74a2fcdbba
commit 4a239248b1
No known key found for this signature in database
GPG Key ID: 569C4EEC436F5232
27 changed files with 149 additions and 6021 deletions

View File

@ -550,13 +550,9 @@ async function handleStreamingProcess(
const aiServiceManager = await import('../../services/llm/ai_service_manager.js');
await aiServiceManager.default.getOrCreateAnyService();
// Use the chat pipeline directly for streaming
const { ChatPipeline } = await import('../../services/llm/pipeline/chat_pipeline.js');
const pipeline = new ChatPipeline({
enableStreaming: true,
enableMetrics: true,
maxToolCallIterations: 5
});
// Use the V2 pipeline directly for streaming
const pipelineV2Module = await import('../../services/llm/pipeline/pipeline_v2.js');
const pipeline = pipelineV2Module.default;
// Get selected model
const { getSelectedModelConfig } = await import('../../services/llm/config/configuration_helpers.js');

View File

@ -6,8 +6,7 @@ import log from "../../log.js";
import type { Request, Response } from "express";
import type { Message } from "../ai_interface.js";
import aiServiceManager from "../ai_service_manager.js";
import { ChatPipeline } from "../pipeline/chat_pipeline.js";
import type { ChatPipelineInput } from "../pipeline/interfaces.js";
import pipelineV2, { type PipelineV2Input } from "../pipeline/pipeline_v2.js";
import options from "../../options.js";
import { ToolHandler } from "./handlers/tool_handler.js";
import chatStorageService from '../chat_storage_service.js';
@ -113,13 +112,6 @@ class RestChatService {
// Initialize tools
await ToolHandler.ensureToolsInitialized();
// Create and use the chat pipeline
const pipeline = new ChatPipeline({
enableStreaming: req.method === 'GET',
enableMetrics: true,
maxToolCallIterations: 5
});
// Get user's preferred model
const preferredModel = await this.getPreferredModel();
@ -128,7 +120,8 @@ class RestChatService {
systemPrompt: chat.messages.find(m => m.role === 'system')?.content,
model: preferredModel,
stream: !!(req.method === 'GET' || req.query.format === 'stream' || req.query.stream === 'true'),
chatNoteId: chatNoteId
chatNoteId: chatNoteId,
enableTools: true
};
log.info(`Pipeline options: ${JSON.stringify({ useAdvancedContext: pipelineOptions.useAdvancedContext, stream: pipelineOptions.stream })}`);
@ -137,14 +130,13 @@ class RestChatService {
const wsService = await import('../../ws.js');
const accumulatedContentRef = { value: '' };
const pipelineInput: ChatPipelineInput = {
const pipelineInput: PipelineV2Input = {
messages: chat.messages.map(msg => ({
role: msg.role as 'user' | 'assistant' | 'system',
content: msg.content
})),
query: content || '',
noteId: undefined, // TODO: Add context note support if needed
showThinking: showThinking,
options: pipelineOptions,
streamCallback: req.method === 'GET' ? (data, done, rawChunk) => {
this.handleStreamCallback(data, done, rawChunk, wsService.default, chatNoteId, res, accumulatedContentRef, chat);
@ -152,7 +144,7 @@ class RestChatService {
};
// Execute the pipeline
const response = await pipeline.execute(pipelineInput);
const response = await pipelineV2.execute(pipelineInput);
if (req.method === 'POST') {
// Add assistant response to chat

View File

@ -2,10 +2,9 @@ import type { Message, ChatCompletionOptions, ChatResponse } from './ai_interfac
import chatStorageService from './chat_storage_service.js';
import log from '../log.js';
import { CONTEXT_PROMPTS, ERROR_PROMPTS } from './constants/llm_prompt_constants.js';
import { ChatPipeline } from './pipeline/chat_pipeline.js';
import type { ChatPipelineConfig, StreamCallback } from './pipeline/interfaces.js';
import pipelineV2, { type PipelineV2Input } from './pipeline/pipeline_v2.js';
import type { StreamCallback } from './pipeline/interfaces.js';
import aiServiceManager from './ai_service_manager.js';
import type { ChatPipelineInput } from './pipeline/interfaces.js';
import type { NoteSearchResult } from './interfaces/context_interfaces.js';
// Update the ChatCompletionOptions interface to include the missing properties
@ -34,44 +33,14 @@ export interface ChatSession {
options?: ChatCompletionOptions;
}
/**
* Chat pipeline configurations for different use cases
*/
const PIPELINE_CONFIGS: Record<string, Partial<ChatPipelineConfig>> = {
default: {
enableStreaming: true,
enableMetrics: true
},
agent: {
enableStreaming: true,
enableMetrics: true,
maxToolCallIterations: 5
},
performance: {
enableStreaming: false,
enableMetrics: true
}
};
/**
* Service for managing chat interactions and history
*/
export class ChatService {
private sessionCache: Map<string, ChatSession> = new Map();
private pipelines: Map<string, ChatPipeline> = new Map();
constructor() {
// Initialize pipelines
Object.entries(PIPELINE_CONFIGS).forEach(([name, config]) => {
this.pipelines.set(name, new ChatPipeline(config));
});
}
/**
* Get a pipeline by name, or the default one
*/
private getPipeline(name: string = 'default'): ChatPipeline {
return this.pipelines.get(name) || this.pipelines.get('default')!;
// Pipeline V2 is used directly as a singleton, no initialization needed
}
/**
@ -156,17 +125,15 @@ export class ChatService {
// Log message processing
log.info(`Processing message: "${content.substring(0, 100)}..."`);
// Select pipeline to use
const pipeline = this.getPipeline();
// Include sessionId in the options for tool execution tracking
const pipelineOptions = {
...(options || session.options || {}),
sessionId: session.id
sessionId: session.id,
enableTools: options?.enableTools !== false
};
// Execute the pipeline
const response = await pipeline.execute({
const response = await pipelineV2.execute({
messages: session.messages,
options: pipelineOptions,
query: content,
@ -261,26 +228,20 @@ export class ChatService {
log.info(`Processing context-aware message: "${content.substring(0, 100)}..."`);
log.info(`Using context from note: ${noteId}`);
// Get showThinking option if it exists
const showThinking = options?.showThinking === true;
// Select appropriate pipeline based on whether agent tools are needed
const pipelineType = showThinking ? 'agent' : 'default';
const pipeline = this.getPipeline(pipelineType);
// Include sessionId in the options for tool execution tracking
const pipelineOptions = {
...(options || session.options || {}),
sessionId: session.id
sessionId: session.id,
useAdvancedContext: true,
enableTools: options?.enableTools !== false
};
// Execute the pipeline with note context
const response = await pipeline.execute({
const response = await pipelineV2.execute({
messages: session.messages,
options: pipelineOptions,
noteId,
query: content,
showThinking,
streamCallback
});
@ -351,6 +312,9 @@ export class ChatService {
* @param noteId - The ID of the note to add context from
* @param useSmartContext - Whether to use smart context extraction (default: true)
* @returns The updated chat session
*
* @deprecated This method directly accesses legacy pipeline stages.
* Consider using sendContextAwareMessage() instead which uses the V2 pipeline.
*/
async addNoteContext(sessionId: string, noteId: string, useSmartContext = true): Promise<ChatSession> {
const session = await this.getOrCreateSession(sessionId);
@ -359,90 +323,94 @@ export class ChatService {
const lastUserMessage = [...session.messages].reverse()
.find(msg => msg.role === 'user' && msg.content.length > 10)?.content || '';
// Use the context extraction stage from the pipeline
const pipeline = this.getPipeline();
const contextResult = await pipeline.stages.contextExtraction.execute({
noteId,
query: lastUserMessage,
useSmartContext
}) as ContextExtractionResult;
// Use context service directly instead of pipeline stages
try {
const contextService = await import('./context/services/context_service.js');
if (contextService?.default?.findRelevantNotes) {
const results = await contextService.default.findRelevantNotes(lastUserMessage, noteId, {
maxResults: 5,
summarize: true
});
const contextMessage: Message = {
role: 'user',
content: CONTEXT_PROMPTS.NOTE_CONTEXT_PROMPT.replace('{context}', contextResult.context)
};
if (results && results.length > 0) {
const context = results.map(r => `${r.title}: ${r.content}`).join('\n\n');
const contextMessage: Message = {
role: 'user',
content: CONTEXT_PROMPTS.NOTE_CONTEXT_PROMPT.replace('{context}', context)
};
session.messages.push(contextMessage);
session.messages.push(contextMessage);
// Store the context note id in metadata
const metadata = {
contextNoteId: noteId
};
// Store the context note id in metadata
const metadata = { contextNoteId: noteId };
// Check if the context extraction result has sources
if (contextResult.sources && contextResult.sources.length > 0) {
// Convert the sources to match expected format (handling null vs undefined)
const sources = contextResult.sources.map(source => ({
noteId: source.noteId,
title: source.title,
similarity: source.similarity,
// Replace null with undefined for content
content: source.content === null ? undefined : source.content
}));
// Convert results to sources format
const sources = results.map(source => ({
noteId: source.noteId,
title: source.title,
similarity: source.similarity,
content: source.content === null ? undefined : source.content
}));
// Store these sources in metadata
await chatStorageService.recordSources(session.id, sources);
await chatStorageService.recordSources(session.id, sources);
await chatStorageService.updateChat(session.id, session.messages, undefined, metadata);
}
}
} catch (error) {
log.error(`Error adding note context: ${error}`);
}
await chatStorageService.updateChat(session.id, session.messages, undefined, metadata);
return session;
}
/**
* Add semantically relevant context from a note based on a specific query
*
* @deprecated This method directly accesses legacy pipeline stages.
* Consider using sendContextAwareMessage() instead which uses the V2 pipeline.
*/
async addSemanticNoteContext(sessionId: string, noteId: string, query: string): Promise<ChatSession> {
const session = await this.getOrCreateSession(sessionId);
// Use the semantic context extraction stage from the pipeline
const pipeline = this.getPipeline();
const contextResult = await pipeline.stages.semanticContextExtraction.execute({
noteId,
query
});
// Use context service directly instead of pipeline stages
try {
const contextService = await import('./context/services/context_service.js');
if (contextService?.default?.findRelevantNotes) {
const results = await contextService.default.findRelevantNotes(query, noteId, {
maxResults: 5,
summarize: true
});
const contextMessage: Message = {
role: 'user',
content: CONTEXT_PROMPTS.SEMANTIC_NOTE_CONTEXT_PROMPT
.replace('{query}', query)
.replace('{context}', contextResult.context)
};
if (results && results.length > 0) {
const context = results.map(r => `${r.title}: ${r.content}`).join('\n\n');
const contextMessage: Message = {
role: 'user',
content: CONTEXT_PROMPTS.SEMANTIC_NOTE_CONTEXT_PROMPT
.replace('{query}', query)
.replace('{context}', context)
};
session.messages.push(contextMessage);
session.messages.push(contextMessage);
// Store the context note id and query in metadata
const metadata = {
contextNoteId: noteId
};
// Store the context note id and query in metadata
const metadata = { contextNoteId: noteId };
// Check if the semantic context extraction result has sources
const contextSources = (contextResult as ContextExtractionResult).sources || [];
if (contextSources && contextSources.length > 0) {
// Convert the sources to the format expected by recordSources
const sources = contextSources.map((source) => ({
noteId: source.noteId,
title: source.title,
similarity: source.similarity,
content: source.content === null ? undefined : source.content
}));
// Convert results to sources format
const sources = results.map(source => ({
noteId: source.noteId,
title: source.title,
similarity: source.similarity,
content: source.content === null ? undefined : source.content
}));
// Store these sources in metadata
await chatStorageService.recordSources(session.id, sources);
await chatStorageService.recordSources(session.id, sources);
await chatStorageService.updateChat(session.id, session.messages, undefined, metadata);
}
}
} catch (error) {
log.error(`Error adding semantic note context: ${error}`);
}
await chatStorageService.updateChat(session.id, session.messages, undefined, metadata);
return session;
}
@ -486,18 +454,22 @@ export class ChatService {
/**
* Get pipeline performance metrics
*
* @deprecated Pipeline V2 uses structured logging instead of metrics.
* Check logs for performance data.
*/
getPipelineMetrics(pipelineType: string = 'default'): unknown {
const pipeline = this.getPipeline(pipelineType);
return pipeline.getMetrics();
getPipelineMetrics(): unknown {
log.warn('getPipelineMetrics() is deprecated. Pipeline V2 uses structured logging.');
return { message: 'Metrics deprecated. Use structured logs instead.' };
}
/**
* Reset pipeline metrics
*
* @deprecated Pipeline V2 uses structured logging instead of metrics.
*/
resetPipelineMetrics(pipelineType: string = 'default'): void {
const pipeline = this.getPipeline(pipelineType);
pipeline.resetMetrics();
resetPipelineMetrics(): void {
log.warn('resetPipelineMetrics() is deprecated. Pipeline V2 uses structured logging.');
}
/**
@ -554,16 +526,18 @@ export class ChatService {
log.info(`Using chat pipeline for advanced context with query: ${query.substring(0, 50)}...`);
// Create a pipeline input with the query and messages
const pipelineInput: ChatPipelineInput = {
const pipelineInput: PipelineV2Input = {
messages,
options,
options: {
...options,
enableTools: options.enableTools !== false
},
query,
noteId: options.noteId
};
// Execute the pipeline
const pipeline = this.getPipeline(options.pipeline);
const response = await pipeline.execute(pipelineInput);
const response = await pipelineV2.execute(pipelineInput);
log.info(`Pipeline execution complete, response contains tools: ${response.tool_calls ? 'yes' : 'no'}`);
if (response.tool_calls) {
log.info(`Tool calls in pipeline response: ${response.tool_calls.length}`);

View File

@ -1,429 +0,0 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
import { ChatPipeline } from './chat_pipeline.js';
import type { ChatPipelineInput, ChatPipelineConfig } from './interfaces.js';
import type { Message, ChatResponse } from '../ai_interface.js';
// Mock all pipeline stages as classes that can be instantiated
vi.mock('./stages/context_extraction_stage.js', () => {
class MockContextExtractionStage {
execute = vi.fn().mockResolvedValue({});
}
return { ContextExtractionStage: MockContextExtractionStage };
});
vi.mock('./stages/semantic_context_extraction_stage.js', () => {
class MockSemanticContextExtractionStage {
execute = vi.fn().mockResolvedValue({
context: ''
});
}
return { SemanticContextExtractionStage: MockSemanticContextExtractionStage };
});
vi.mock('./stages/agent_tools_context_stage.js', () => {
class MockAgentToolsContextStage {
execute = vi.fn().mockResolvedValue({});
}
return { AgentToolsContextStage: MockAgentToolsContextStage };
});
vi.mock('./stages/message_preparation_stage.js', () => {
class MockMessagePreparationStage {
execute = vi.fn().mockResolvedValue({
messages: [{ role: 'user', content: 'Hello' }]
});
}
return { MessagePreparationStage: MockMessagePreparationStage };
});
vi.mock('./stages/model_selection_stage.js', () => {
class MockModelSelectionStage {
execute = vi.fn().mockResolvedValue({
options: {
provider: 'openai',
model: 'gpt-4',
enableTools: true,
stream: false
}
});
}
return { ModelSelectionStage: MockModelSelectionStage };
});
vi.mock('./stages/llm_completion_stage.js', () => {
class MockLLMCompletionStage {
execute = vi.fn().mockResolvedValue({
response: {
text: 'Hello! How can I help you?',
role: 'assistant',
finish_reason: 'stop'
}
});
}
return { LLMCompletionStage: MockLLMCompletionStage };
});
vi.mock('./stages/response_processing_stage.js', () => {
class MockResponseProcessingStage {
execute = vi.fn().mockResolvedValue({
text: 'Hello! How can I help you?'
});
}
return { ResponseProcessingStage: MockResponseProcessingStage };
});
vi.mock('./stages/tool_calling_stage.js', () => {
class MockToolCallingStage {
execute = vi.fn().mockResolvedValue({
needsFollowUp: false,
messages: []
});
}
return { ToolCallingStage: MockToolCallingStage };
});
vi.mock('../tools/tool_registry.js', () => ({
default: {
getTools: vi.fn().mockReturnValue([]),
executeTool: vi.fn()
}
}));
vi.mock('../tools/tool_initializer.js', () => ({
default: {
initializeTools: vi.fn().mockResolvedValue(undefined)
}
}));
vi.mock('../ai_service_manager.js', () => ({
default: {
getService: vi.fn().mockReturnValue({
decomposeQuery: vi.fn().mockResolvedValue({
subQueries: [{ text: 'test query' }],
complexity: 3
})
})
}
}));
vi.mock('../context/services/query_processor.js', () => ({
default: {
decomposeQuery: vi.fn().mockResolvedValue({
subQueries: [{ text: 'test query' }],
complexity: 3
})
}
}));
vi.mock('../constants/search_constants.js', () => ({
SEARCH_CONSTANTS: {
TOOL_EXECUTION: {
MAX_TOOL_CALL_ITERATIONS: 5
}
}
}));
vi.mock('../../log.js', () => ({
default: {
info: vi.fn(),
error: vi.fn(),
warn: vi.fn()
}
}));
describe('ChatPipeline', () => {
let pipeline: ChatPipeline;
beforeEach(() => {
vi.clearAllMocks();
pipeline = new ChatPipeline();
});
afterEach(() => {
vi.restoreAllMocks();
});
describe('constructor', () => {
it('should initialize with default configuration', () => {
expect(pipeline.config).toEqual({
enableStreaming: true,
enableMetrics: true,
maxToolCallIterations: 5
});
});
it('should accept custom configuration', () => {
const customConfig: Partial<ChatPipelineConfig> = {
enableStreaming: false,
maxToolCallIterations: 5
};
const customPipeline = new ChatPipeline(customConfig);
expect(customPipeline.config).toEqual({
enableStreaming: false,
enableMetrics: true,
maxToolCallIterations: 5
});
});
it('should initialize all pipeline stages', () => {
expect(pipeline.stages.contextExtraction).toBeDefined();
expect(pipeline.stages.semanticContextExtraction).toBeDefined();
expect(pipeline.stages.agentToolsContext).toBeDefined();
expect(pipeline.stages.messagePreparation).toBeDefined();
expect(pipeline.stages.modelSelection).toBeDefined();
expect(pipeline.stages.llmCompletion).toBeDefined();
expect(pipeline.stages.responseProcessing).toBeDefined();
expect(pipeline.stages.toolCalling).toBeDefined();
});
it('should initialize metrics', () => {
expect(pipeline.metrics).toEqual({
totalExecutions: 0,
averageExecutionTime: 0,
stageMetrics: {
contextExtraction: {
totalExecutions: 0,
averageExecutionTime: 0
},
semanticContextExtraction: {
totalExecutions: 0,
averageExecutionTime: 0
},
agentToolsContext: {
totalExecutions: 0,
averageExecutionTime: 0
},
messagePreparation: {
totalExecutions: 0,
averageExecutionTime: 0
},
modelSelection: {
totalExecutions: 0,
averageExecutionTime: 0
},
llmCompletion: {
totalExecutions: 0,
averageExecutionTime: 0
},
responseProcessing: {
totalExecutions: 0,
averageExecutionTime: 0
},
toolCalling: {
totalExecutions: 0,
averageExecutionTime: 0
}
}
});
});
});
describe('execute', () => {
const messages: Message[] = [
{ role: 'user', content: 'Hello' }
];
const input: ChatPipelineInput = {
query: 'Hello',
messages,
options: {
useAdvancedContext: true // Enable advanced context to trigger full pipeline flow
},
noteId: 'note-123'
};
it('should execute all pipeline stages in order', async () => {
const result = await pipeline.execute(input);
// Get the mock instances from the pipeline stages
expect(pipeline.stages.modelSelection.execute).toHaveBeenCalled();
expect(pipeline.stages.messagePreparation.execute).toHaveBeenCalled();
expect(pipeline.stages.llmCompletion.execute).toHaveBeenCalled();
expect(pipeline.stages.responseProcessing.execute).toHaveBeenCalled();
expect(result).toEqual({
text: 'Hello! How can I help you?',
role: 'assistant',
finish_reason: 'stop'
});
});
it('should increment total executions metric', async () => {
const initialExecutions = pipeline.metrics.totalExecutions;
await pipeline.execute(input);
expect(pipeline.metrics.totalExecutions).toBe(initialExecutions + 1);
});
it('should handle streaming callback', async () => {
const streamCallback = vi.fn();
const inputWithStream = { ...input, streamCallback };
await pipeline.execute(inputWithStream);
expect(pipeline.stages.llmCompletion.execute).toHaveBeenCalled();
});
it('should handle tool calling iterations', async () => {
// Mock LLM response to include tool calls
(pipeline.stages.llmCompletion.execute as any).mockResolvedValue({
response: {
text: 'Hello! How can I help you?',
role: 'assistant',
finish_reason: 'stop',
tool_calls: [{ id: 'tool1', function: { name: 'search', arguments: '{}' } }]
}
});
// Mock tool calling to require iteration then stop
(pipeline.stages.toolCalling.execute as any)
.mockResolvedValueOnce({ needsFollowUp: true, messages: [] })
.mockResolvedValueOnce({ needsFollowUp: false, messages: [] });
await pipeline.execute(input);
expect(pipeline.stages.toolCalling.execute).toHaveBeenCalledTimes(2);
});
it('should respect max tool call iterations', async () => {
// Mock LLM response to include tool calls
(pipeline.stages.llmCompletion.execute as any).mockResolvedValue({
response: {
text: 'Hello! How can I help you?',
role: 'assistant',
finish_reason: 'stop',
tool_calls: [{ id: 'tool1', function: { name: 'search', arguments: '{}' } }]
}
});
// Mock tool calling to always require iteration
(pipeline.stages.toolCalling.execute as any).mockResolvedValue({ needsFollowUp: true, messages: [] });
await pipeline.execute(input);
// Should be called maxToolCallIterations times (5 iterations as configured)
expect(pipeline.stages.toolCalling.execute).toHaveBeenCalledTimes(5);
});
it('should handle stage errors gracefully', async () => {
(pipeline.stages.modelSelection.execute as any).mockRejectedValueOnce(new Error('Model selection failed'));
await expect(pipeline.execute(input)).rejects.toThrow('Model selection failed');
});
it('should pass context between stages', async () => {
await pipeline.execute(input);
// Check that stage was called (the actual context passing is tested in integration)
expect(pipeline.stages.messagePreparation.execute).toHaveBeenCalled();
});
it('should handle empty messages', async () => {
const emptyInput = { ...input, messages: [] };
const result = await pipeline.execute(emptyInput);
expect(result).toBeDefined();
expect(pipeline.stages.modelSelection.execute).toHaveBeenCalled();
});
it('should calculate content length for model selection', async () => {
await pipeline.execute(input);
expect(pipeline.stages.modelSelection.execute).toHaveBeenCalledWith(
expect.objectContaining({
contentLength: expect.any(Number)
})
);
});
it('should update average execution time', async () => {
const initialAverage = pipeline.metrics.averageExecutionTime;
await pipeline.execute(input);
expect(pipeline.metrics.averageExecutionTime).toBeGreaterThanOrEqual(0);
});
it('should disable streaming when config is false', async () => {
const noStreamPipeline = new ChatPipeline({ enableStreaming: false });
await noStreamPipeline.execute(input);
expect(noStreamPipeline.stages.llmCompletion.execute).toHaveBeenCalled();
});
it('should handle concurrent executions', async () => {
const promise1 = pipeline.execute(input);
const promise2 = pipeline.execute(input);
const [result1, result2] = await Promise.all([promise1, promise2]);
expect(result1).toBeDefined();
expect(result2).toBeDefined();
expect(pipeline.metrics.totalExecutions).toBe(2);
});
});
describe('metrics', () => {
const input: ChatPipelineInput = {
query: 'Hello',
messages: [{ role: 'user', content: 'Hello' }],
options: {
useAdvancedContext: true
},
noteId: 'note-123'
};
it('should track stage execution times when metrics enabled', async () => {
await pipeline.execute(input);
expect(pipeline.metrics.stageMetrics.modelSelection.totalExecutions).toBe(1);
expect(pipeline.metrics.stageMetrics.llmCompletion.totalExecutions).toBe(1);
});
it('should skip stage metrics when disabled', async () => {
const noMetricsPipeline = new ChatPipeline({ enableMetrics: false });
await noMetricsPipeline.execute(input);
// Total executions is still tracked, but stage metrics are not updated
expect(noMetricsPipeline.metrics.totalExecutions).toBe(1);
expect(noMetricsPipeline.metrics.stageMetrics.modelSelection.totalExecutions).toBe(0);
expect(noMetricsPipeline.metrics.stageMetrics.llmCompletion.totalExecutions).toBe(0);
});
});
describe('error handling', () => {
const input: ChatPipelineInput = {
query: 'Hello',
messages: [{ role: 'user', content: 'Hello' }],
options: {
useAdvancedContext: true
},
noteId: 'note-123'
};
it('should propagate errors from stages', async () => {
(pipeline.stages.modelSelection.execute as any).mockRejectedValueOnce(new Error('Model selection failed'));
await expect(pipeline.execute(input)).rejects.toThrow('Model selection failed');
});
it('should handle invalid input gracefully', async () => {
const invalidInput = {
query: '',
messages: [],
options: {},
noteId: ''
};
const result = await pipeline.execute(invalidInput);
expect(result).toBeDefined();
});
});
});

View File

@ -1,983 +0,0 @@
import type { ChatPipelineInput, ChatPipelineConfig, PipelineMetrics, StreamCallback } from './interfaces.js';
import type { ChatResponse, StreamChunk, Message } from '../ai_interface.js';
import { ContextExtractionStage } from './stages/context_extraction_stage.js';
import { SemanticContextExtractionStage } from './stages/semantic_context_extraction_stage.js';
import { AgentToolsContextStage } from './stages/agent_tools_context_stage.js';
import { MessagePreparationStage } from './stages/message_preparation_stage.js';
import { ModelSelectionStage } from './stages/model_selection_stage.js';
import { LLMCompletionStage } from './stages/llm_completion_stage.js';
import { ResponseProcessingStage } from './stages/response_processing_stage.js';
import { ToolCallingStage } from './stages/tool_calling_stage.js';
// Traditional search is used instead of vector search
import toolRegistry from '../tools/tool_registry.js';
import toolInitializer from '../tools/tool_initializer.js';
import log from '../../log.js';
import type { LLMServiceInterface } from '../interfaces/agent_tool_interfaces.js';
import { SEARCH_CONSTANTS } from '../constants/search_constants.js';
/**
* Pipeline for managing the entire chat flow
* Implements a modular, composable architecture where each stage is a separate component
*/
export class ChatPipeline {
stages: {
contextExtraction: ContextExtractionStage;
semanticContextExtraction: SemanticContextExtractionStage;
agentToolsContext: AgentToolsContextStage;
messagePreparation: MessagePreparationStage;
modelSelection: ModelSelectionStage;
llmCompletion: LLMCompletionStage;
responseProcessing: ResponseProcessingStage;
toolCalling: ToolCallingStage;
// traditional search is used instead of vector search
};
config: ChatPipelineConfig;
metrics: PipelineMetrics;
/**
* Create a new chat pipeline
* @param config Optional pipeline configuration
*/
constructor(config?: Partial<ChatPipelineConfig>) {
// Initialize all pipeline stages
this.stages = {
contextExtraction: new ContextExtractionStage(),
semanticContextExtraction: new SemanticContextExtractionStage(),
agentToolsContext: new AgentToolsContextStage(),
messagePreparation: new MessagePreparationStage(),
modelSelection: new ModelSelectionStage(),
llmCompletion: new LLMCompletionStage(),
responseProcessing: new ResponseProcessingStage(),
toolCalling: new ToolCallingStage(),
// traditional search is used instead of vector search
};
// Set default configuration values
this.config = {
enableStreaming: true,
enableMetrics: true,
maxToolCallIterations: SEARCH_CONSTANTS.TOOL_EXECUTION.MAX_TOOL_CALL_ITERATIONS,
...config
};
// Initialize metrics
this.metrics = {
totalExecutions: 0,
averageExecutionTime: 0,
stageMetrics: {}
};
// Initialize stage metrics
Object.keys(this.stages).forEach(stageName => {
this.metrics.stageMetrics[stageName] = {
totalExecutions: 0,
averageExecutionTime: 0
};
});
}
/**
* Execute the chat pipeline
* This is the main entry point that orchestrates all pipeline stages
*/
async execute(input: ChatPipelineInput): Promise<ChatResponse> {
log.info(`========== STARTING CHAT PIPELINE ==========`);
log.info(`Executing chat pipeline with ${input.messages.length} messages`);
const startTime = Date.now();
this.metrics.totalExecutions++;
// Initialize streaming handler if requested
let streamCallback = input.streamCallback;
let accumulatedText = '';
try {
// Extract content length for model selection
let contentLength = 0;
for (const message of input.messages) {
contentLength += message.content.length;
}
// Initialize tools if needed
try {
const toolCount = toolRegistry.getAllTools().length;
// If there are no tools registered, initialize them
if (toolCount === 0) {
log.info('No tools found in registry, initializing tools...');
// Tools are already initialized in the AIServiceManager constructor
// No need to initialize them again
log.info(`Tools initialized, now have ${toolRegistry.getAllTools().length} tools`);
} else {
log.info(`Found ${toolCount} tools already registered`);
}
} catch (error: any) {
log.error(`Error checking/initializing tools: ${error.message || String(error)}`);
}
// First, select the appropriate model based on query complexity and content length
const modelSelectionStartTime = Date.now();
log.info(`========== MODEL SELECTION ==========`);
const modelSelection = await this.stages.modelSelection.execute({
options: input.options,
query: input.query,
contentLength
});
this.updateStageMetrics('modelSelection', modelSelectionStartTime);
log.info(`Selected model: ${modelSelection.options.model || 'default'}, enableTools: ${modelSelection.options.enableTools}`);
// Determine if we should use tools or semantic context
const useTools = modelSelection.options.enableTools === true;
const useEnhancedContext = input.options?.useAdvancedContext === true;
// Log details about the advanced context parameter
log.info(`Enhanced context option check: input.options=${JSON.stringify(input.options || {})}`);
log.info(`Enhanced context decision: useEnhancedContext=${useEnhancedContext}, hasQuery=${!!input.query}`);
// Early return if we don't have a query or enhanced context is disabled
if (!input.query || !useEnhancedContext) {
log.info(`========== SIMPLE QUERY MODE ==========`);
log.info('Enhanced context disabled or no query provided, skipping context enrichment');
// Prepare messages without additional context
const messagePreparationStartTime = Date.now();
const preparedMessages = await this.stages.messagePreparation.execute({
messages: input.messages,
systemPrompt: input.options?.systemPrompt,
options: modelSelection.options
});
this.updateStageMetrics('messagePreparation', messagePreparationStartTime);
// Generate completion using the LLM
const llmStartTime = Date.now();
const completion = await this.stages.llmCompletion.execute({
messages: preparedMessages.messages,
options: modelSelection.options
});
this.updateStageMetrics('llmCompletion', llmStartTime);
return completion.response;
}
// STAGE 1: Start with the user's query
const userQuery = input.query || '';
log.info(`========== STAGE 1: USER QUERY ==========`);
log.info(`Processing query with: question="${userQuery.substring(0, 50)}...", noteId=${input.noteId}, showThinking=${input.showThinking}`);
// STAGE 2: Perform query decomposition using the LLM
log.info(`========== STAGE 2: QUERY DECOMPOSITION ==========`);
log.info('Performing query decomposition to generate effective search queries');
const llmService = await this.getLLMService();
let searchQueries = [userQuery];
if (llmService) {
try {
// Import the query processor and use its decomposeQuery method
const queryProcessor = (await import('../context/services/query_processor.js')).default;
// Use the enhanced query processor with the LLM service
const decomposedQuery = await queryProcessor.decomposeQuery(userQuery, undefined, llmService);
if (decomposedQuery && decomposedQuery.subQueries && decomposedQuery.subQueries.length > 0) {
// Extract search queries from the decomposed query
searchQueries = decomposedQuery.subQueries.map(sq => sq.text);
// Always include the original query if it's not already included
if (!searchQueries.includes(userQuery)) {
searchQueries.unshift(userQuery);
}
log.info(`Query decomposed with complexity ${decomposedQuery.complexity}/10 into ${searchQueries.length} search queries`);
} else {
log.info('Query decomposition returned no sub-queries, using original query');
}
} catch (error: any) {
log.error(`Error in query decomposition: ${error.message || String(error)}`);
}
} else {
log.info('No LLM service available for query decomposition, using original query');
}
// STAGE 3: Vector search has been removed - skip semantic search
const vectorSearchStartTime = Date.now();
log.info(`========== STAGE 3: VECTOR SEARCH (DISABLED) ==========`);
log.info('Vector search has been removed - LLM will rely on tool calls for context');
// Create empty vector search result since vector search is disabled
const vectorSearchResult = {
searchResults: [],
totalResults: 0,
executionTime: Date.now() - vectorSearchStartTime
};
// Skip metrics update for disabled vector search functionality
log.info(`Vector search disabled - using tool-based context extraction instead`);
// Extract context from search results
log.info(`========== SEMANTIC CONTEXT EXTRACTION ==========`);
const semanticContextStartTime = Date.now();
const semanticContext = await this.stages.semanticContextExtraction.execute({
noteId: input.noteId || 'global',
query: userQuery,
messages: input.messages,
searchResults: vectorSearchResult.searchResults
});
const context = semanticContext.context;
this.updateStageMetrics('semanticContextExtraction', semanticContextStartTime);
log.info(`Extracted semantic context (${context.length} chars)`);
// STAGE 4: Prepare messages with context and tool definitions for the LLM
log.info(`========== STAGE 4: MESSAGE PREPARATION ==========`);
const messagePreparationStartTime = Date.now();
const preparedMessages = await this.stages.messagePreparation.execute({
messages: input.messages,
context,
systemPrompt: input.options?.systemPrompt,
options: modelSelection.options
});
this.updateStageMetrics('messagePreparation', messagePreparationStartTime);
log.info(`Prepared ${preparedMessages.messages.length} messages for LLM, tools enabled: ${useTools}`);
// Setup streaming handler if streaming is enabled and callback provided
// Check if streaming should be enabled based on several conditions
const streamEnabledInConfig = this.config.enableStreaming;
const streamFormatRequested = input.format === 'stream';
const streamRequestedInOptions = modelSelection.options.stream === true;
const streamCallbackAvailable = typeof streamCallback === 'function';
log.info(`[ChatPipeline] Request type info - Format: ${input.format || 'not specified'}, Options from pipelineInput: ${JSON.stringify({stream: input.options?.stream})}`);
log.info(`[ChatPipeline] Stream settings - config.enableStreaming: ${streamEnabledInConfig}, format parameter: ${input.format}, modelSelection.options.stream: ${modelSelection.options.stream}, streamCallback available: ${streamCallbackAvailable}`);
// IMPORTANT: Respect the existing stream option but with special handling for callbacks:
// 1. If a stream callback is available, streaming MUST be enabled for it to work
// 2. Otherwise, preserve the original stream setting from input options
// First, determine what the stream value should be based on various factors:
let shouldEnableStream = modelSelection.options.stream;
if (streamCallbackAvailable) {
// If we have a stream callback, we NEED to enable streaming
// This is critical for GET requests with EventSource
shouldEnableStream = true;
log.info(`[ChatPipeline] Stream callback available, enabling streaming`);
} else if (streamRequestedInOptions) {
// Stream was explicitly requested in options, honor that setting
log.info(`[ChatPipeline] Stream explicitly requested in options: ${streamRequestedInOptions}`);
shouldEnableStream = streamRequestedInOptions;
} else if (streamFormatRequested) {
// Format=stream parameter indicates streaming was requested
log.info(`[ChatPipeline] Stream format requested in parameters`);
shouldEnableStream = true;
} else {
// No explicit streaming indicators, use config default
log.info(`[ChatPipeline] No explicit stream settings, using config default: ${streamEnabledInConfig}`);
shouldEnableStream = streamEnabledInConfig;
}
// Set the final stream option
modelSelection.options.stream = shouldEnableStream;
log.info(`[ChatPipeline] Final streaming decision: stream=${shouldEnableStream}, will stream to client=${streamCallbackAvailable && shouldEnableStream}`);
// STAGE 5 & 6: Handle LLM completion and tool execution loop
log.info(`========== STAGE 5: LLM COMPLETION ==========`);
const llmStartTime = Date.now();
const completion = await this.stages.llmCompletion.execute({
messages: preparedMessages.messages,
options: modelSelection.options
});
this.updateStageMetrics('llmCompletion', llmStartTime);
log.info(`Received LLM response from model: ${completion.response.model}, provider: ${completion.response.provider}`);
// Track whether content has been streamed to prevent duplication
let hasStreamedContent = false;
// Handle streaming if enabled and available
// Use shouldEnableStream variable which contains our streaming decision
if (shouldEnableStream && completion.response.stream && streamCallback) {
// Setup stream handler that passes chunks through response processing
await completion.response.stream(async (chunk: StreamChunk) => {
// Process the chunk text
const processedChunk = await this.processStreamChunk(chunk, input.options);
// Accumulate text for final response
accumulatedText += processedChunk.text;
// Forward to callback with original chunk data in case it contains additional information
streamCallback(processedChunk.text, processedChunk.done, chunk);
// Mark that we have streamed content to prevent duplication
hasStreamedContent = true;
});
}
// Process any tool calls in the response
let currentMessages = preparedMessages.messages;
let currentResponse = completion.response;
let toolCallIterations = 0;
const maxToolCallIterations = this.config.maxToolCallIterations;
// Check if tools were enabled in the options
const toolsEnabled = modelSelection.options.enableTools !== false;
// Log decision points for tool execution
log.info(`========== TOOL EXECUTION DECISION ==========`);
log.info(`Tools enabled in options: ${toolsEnabled}`);
log.info(`Response provider: ${currentResponse.provider || 'unknown'}`);
log.info(`Response model: ${currentResponse.model || 'unknown'}`);
// Enhanced tool_calls detection - check both direct property and getter
let hasToolCalls = false;
log.info(`[TOOL CALL DEBUG] Starting tool call detection for provider: ${currentResponse.provider}`);
// Check response object structure
log.info(`[TOOL CALL DEBUG] Response properties: ${Object.keys(currentResponse).join(', ')}`);
// Try to access tool_calls as a property
if ('tool_calls' in currentResponse) {
log.info(`[TOOL CALL DEBUG] tool_calls exists as a direct property`);
log.info(`[TOOL CALL DEBUG] tool_calls type: ${typeof currentResponse.tool_calls}`);
if (currentResponse.tool_calls && Array.isArray(currentResponse.tool_calls)) {
log.info(`[TOOL CALL DEBUG] tool_calls is an array with length: ${currentResponse.tool_calls.length}`);
} else {
log.info(`[TOOL CALL DEBUG] tool_calls is not an array or is empty: ${JSON.stringify(currentResponse.tool_calls)}`);
}
} else {
log.info(`[TOOL CALL DEBUG] tool_calls does not exist as a direct property`);
}
// First check the direct property
if (currentResponse.tool_calls && currentResponse.tool_calls.length > 0) {
hasToolCalls = true;
log.info(`Response has tool_calls property with ${currentResponse.tool_calls.length} tools`);
log.info(`Tool calls details: ${JSON.stringify(currentResponse.tool_calls)}`);
}
// Check if it might be a getter (for dynamic tool_calls collection)
else {
log.info(`[TOOL CALL DEBUG] Direct property check failed, trying getter approach`);
try {
const toolCallsDesc = Object.getOwnPropertyDescriptor(currentResponse, 'tool_calls');
if (toolCallsDesc) {
log.info(`[TOOL CALL DEBUG] Found property descriptor for tool_calls: ${JSON.stringify({
configurable: toolCallsDesc.configurable,
enumerable: toolCallsDesc.enumerable,
hasGetter: !!toolCallsDesc.get,
hasSetter: !!toolCallsDesc.set
})}`);
} else {
log.info(`[TOOL CALL DEBUG] No property descriptor found for tool_calls`);
}
if (toolCallsDesc && typeof toolCallsDesc.get === 'function') {
log.info(`[TOOL CALL DEBUG] Attempting to call the tool_calls getter`);
const dynamicToolCalls = toolCallsDesc.get.call(currentResponse);
log.info(`[TOOL CALL DEBUG] Getter returned: ${JSON.stringify(dynamicToolCalls)}`);
if (dynamicToolCalls && dynamicToolCalls.length > 0) {
hasToolCalls = true;
log.info(`Response has dynamic tool_calls with ${dynamicToolCalls.length} tools`);
log.info(`Dynamic tool calls details: ${JSON.stringify(dynamicToolCalls)}`);
// Ensure property is available for subsequent code
currentResponse.tool_calls = dynamicToolCalls;
log.info(`[TOOL CALL DEBUG] Updated currentResponse.tool_calls with dynamic values`);
} else {
log.info(`[TOOL CALL DEBUG] Getter returned no valid tool calls`);
}
} else {
log.info(`[TOOL CALL DEBUG] No getter function found for tool_calls`);
}
} catch (e: any) {
log.error(`Error checking dynamic tool_calls: ${e}`);
log.error(`[TOOL CALL DEBUG] Error details: ${e.stack || 'No stack trace'}`);
}
}
log.info(`Response has tool_calls: ${hasToolCalls ? 'true' : 'false'}`);
if (hasToolCalls && currentResponse.tool_calls) {
log.info(`[TOOL CALL DEBUG] Final tool_calls that will be used: ${JSON.stringify(currentResponse.tool_calls)}`);
}
// Tool execution loop
if (toolsEnabled && hasToolCalls && currentResponse.tool_calls) {
log.info(`========== STAGE 6: TOOL EXECUTION ==========`);
log.info(`Response contains ${currentResponse.tool_calls.length} tool calls, processing...`);
// Format tool calls for logging
log.info(`========== TOOL CALL DETAILS ==========`);
currentResponse.tool_calls.forEach((toolCall, idx) => {
log.info(`Tool call ${idx + 1}: name=${toolCall.function?.name || 'unknown'}, id=${toolCall.id || 'no-id'}`);
log.info(`Arguments: ${toolCall.function?.arguments || '{}'}`);
});
// Keep track of whether we're in a streaming response
const isStreaming = shouldEnableStream && streamCallback;
let streamingPaused = false;
// If streaming was enabled, send an update to the user
if (isStreaming && streamCallback) {
streamingPaused = true;
// Send a dedicated message with a specific type for tool execution
streamCallback('', false, {
text: '',
done: false,
toolExecution: {
type: 'start',
tool: {
name: 'tool_execution',
arguments: {}
}
}
});
}
while (toolCallIterations < maxToolCallIterations) {
toolCallIterations++;
log.info(`========== TOOL ITERATION ${toolCallIterations}/${maxToolCallIterations} ==========`);
// Create a copy of messages before tool execution
const previousMessages = [...currentMessages];
try {
const toolCallingStartTime = Date.now();
log.info(`========== PIPELINE TOOL EXECUTION FLOW ==========`);
log.info(`About to call toolCalling.execute with ${currentResponse.tool_calls.length} tool calls`);
log.info(`Tool calls being passed to stage: ${JSON.stringify(currentResponse.tool_calls)}`);
const toolCallingResult = await this.stages.toolCalling.execute({
response: currentResponse,
messages: currentMessages,
options: modelSelection.options
});
this.updateStageMetrics('toolCalling', toolCallingStartTime);
log.info(`ToolCalling stage execution complete, got result with needsFollowUp: ${toolCallingResult.needsFollowUp}`);
// Update messages with tool results
currentMessages = toolCallingResult.messages;
// Log the tool results for debugging
const toolResultMessages = currentMessages.filter(
msg => msg.role === 'tool' && !previousMessages.includes(msg)
);
log.info(`========== TOOL EXECUTION RESULTS ==========`);
log.info(`Received ${toolResultMessages.length} tool results`);
toolResultMessages.forEach((msg, idx) => {
log.info(`Tool result ${idx + 1}: tool_call_id=${msg.tool_call_id}, content=${msg.content}`);
log.info(`Tool result status: ${msg.content.startsWith('Error:') ? 'ERROR' : 'SUCCESS'}`);
log.info(`Tool result for: ${this.getToolNameFromToolCallId(currentMessages, msg.tool_call_id || '')}`);
// If streaming, show tool executions to the user
if (isStreaming && streamCallback) {
// For each tool result, format a readable message for the user
const toolName = this.getToolNameFromToolCallId(currentMessages, msg.tool_call_id || '');
// Create a structured tool result message
// The client will receive this structured data and can display it properly
try {
// Parse the result content if it's JSON
let parsedContent = msg.content;
try {
// Check if the content is JSON
if (msg.content.trim().startsWith('{') || msg.content.trim().startsWith('[')) {
parsedContent = JSON.parse(msg.content);
}
} catch (e) {
// If parsing fails, keep the original content
log.info(`Could not parse tool result as JSON: ${e}`);
}
// Send the structured tool result directly so the client has the raw data
streamCallback('', false, {
text: '',
done: false,
toolExecution: {
type: 'complete',
tool: {
name: toolName,
arguments: {}
},
result: parsedContent
}
});
// No longer need to send formatted text version
// The client should use the structured data instead
} catch (err) {
log.error(`Error sending structured tool result: ${err}`);
// Use structured format here too instead of falling back to text format
streamCallback('', false, {
text: '',
done: false,
toolExecution: {
type: 'complete',
tool: {
name: toolName || 'unknown',
arguments: {}
},
result: msg.content
}
});
}
}
});
// Check if we need another LLM completion for tool results
if (toolCallingResult.needsFollowUp) {
log.info(`========== TOOL FOLLOW-UP REQUIRED ==========`);
log.info('Tool execution complete, sending results back to LLM');
// Ensure messages are properly formatted
this.validateToolMessages(currentMessages);
// If streaming, show progress to the user
if (isStreaming && streamCallback) {
streamCallback('', false, {
text: '',
done: false,
toolExecution: {
type: 'update',
tool: {
name: 'tool_processing',
arguments: {}
}
}
});
}
// Extract tool execution status information for Ollama feedback
let toolExecutionStatus;
if (currentResponse.provider === 'Ollama') {
// Collect tool execution status from the tool results
toolExecutionStatus = toolResultMessages.map(msg => {
// Determine if this was a successful tool call
const isError = msg.content.startsWith('Error:');
return {
toolCallId: msg.tool_call_id || '',
name: msg.name || 'unknown',
success: !isError,
result: msg.content,
error: isError ? msg.content.substring(7) : undefined
};
});
log.info(`Created tool execution status for Ollama: ${toolExecutionStatus.length} entries`);
toolExecutionStatus.forEach((status, idx) => {
log.info(`Tool status ${idx + 1}: ${status.name} - ${status.success ? 'success' : 'failed'}`);
});
}
// Generate a new completion with the updated messages
const followUpStartTime = Date.now();
// Log messages being sent to LLM for tool follow-up
log.info(`========== SENDING TOOL RESULTS TO LLM FOR FOLLOW-UP ==========`);
log.info(`Total messages being sent: ${currentMessages.length}`);
// Log the most recent messages (last 3) for clarity
const recentMessages = currentMessages.slice(-3);
recentMessages.forEach((msg, idx) => {
const position = currentMessages.length - recentMessages.length + idx;
log.info(`Message ${position} (${msg.role}): ${msg.content?.substring(0, 100)}${msg.content?.length > 100 ? '...' : ''}`);
if (msg.tool_calls) {
log.info(` Has ${msg.tool_calls.length} tool calls`);
}
if (msg.tool_call_id) {
log.info(` Tool call ID: ${msg.tool_call_id}`);
}
});
log.info(`LLM follow-up request options: ${JSON.stringify({
model: modelSelection.options.model,
enableTools: true,
stream: modelSelection.options.stream,
provider: currentResponse.provider
})}`);
const followUpCompletion = await this.stages.llmCompletion.execute({
messages: currentMessages,
options: {
...modelSelection.options,
// Ensure tool support is still enabled for follow-up requests
enableTools: true,
// Preserve original streaming setting for tool execution follow-ups
stream: modelSelection.options.stream,
// Add tool execution status for Ollama provider
...(currentResponse.provider === 'Ollama' ? { toolExecutionStatus } : {})
}
});
this.updateStageMetrics('llmCompletion', followUpStartTime);
// Log the follow-up response from the LLM
log.info(`========== LLM FOLLOW-UP RESPONSE RECEIVED ==========`);
log.info(`Follow-up response model: ${followUpCompletion.response.model}, provider: ${followUpCompletion.response.provider}`);
log.info(`Follow-up response text: ${followUpCompletion.response.text?.substring(0, 150)}${followUpCompletion.response.text?.length > 150 ? '...' : ''}`);
log.info(`Follow-up contains tool calls: ${!!followUpCompletion.response.tool_calls && followUpCompletion.response.tool_calls.length > 0}`);
if (followUpCompletion.response.tool_calls && followUpCompletion.response.tool_calls.length > 0) {
log.info(`Follow-up has ${followUpCompletion.response.tool_calls.length} new tool calls`);
}
// Update current response for the next iteration
currentResponse = followUpCompletion.response;
// Check if we need to continue the tool calling loop
if (!currentResponse.tool_calls || currentResponse.tool_calls.length === 0) {
log.info(`========== TOOL EXECUTION COMPLETE ==========`);
log.info('No more tool calls, breaking tool execution loop');
break;
} else {
log.info(`========== ADDITIONAL TOOL CALLS DETECTED ==========`);
log.info(`Next iteration has ${currentResponse.tool_calls.length} more tool calls`);
// Log the next set of tool calls
currentResponse.tool_calls.forEach((toolCall, idx) => {
log.info(`Next tool call ${idx + 1}: name=${toolCall.function?.name || 'unknown'}, id=${toolCall.id || 'no-id'}`);
log.info(`Arguments: ${toolCall.function?.arguments || '{}'}`);
});
}
} else {
log.info(`========== TOOL EXECUTION COMPLETE ==========`);
log.info('No follow-up needed, breaking tool execution loop');
break;
}
} catch (error: any) {
log.info(`========== TOOL EXECUTION ERROR ==========`);
log.error(`Error in tool execution: ${error.message || String(error)}`);
// Add error message to the conversation if tool execution fails
currentMessages.push({
role: 'system',
content: `Error executing tool: ${error.message || String(error)}. Please try a different approach.`
});
// If streaming, show error to the user
if (isStreaming && streamCallback) {
streamCallback('', false, {
text: '',
done: false,
toolExecution: {
type: 'error',
tool: {
name: 'unknown',
arguments: {}
},
result: error.message || 'unknown error'
}
});
}
// For Ollama, create tool execution status with the error
let toolExecutionStatus;
if (currentResponse.provider === 'Ollama' && currentResponse.tool_calls) {
// We need to create error statuses for all tool calls that failed
toolExecutionStatus = currentResponse.tool_calls.map(toolCall => {
return {
toolCallId: toolCall.id || '',
name: toolCall.function?.name || 'unknown',
success: false,
result: `Error: ${error.message || 'unknown error'}`,
error: error.message || 'unknown error'
};
});
log.info(`Created error tool execution status for Ollama: ${toolExecutionStatus.length} entries`);
}
// Make a follow-up request to the LLM with the error information
const errorFollowUpCompletion = await this.stages.llmCompletion.execute({
messages: currentMessages,
options: {
...modelSelection.options,
// Preserve streaming for error follow-up
stream: modelSelection.options.stream,
// For Ollama, include tool execution status
...(currentResponse.provider === 'Ollama' ? { toolExecutionStatus } : {})
}
});
// Log the error follow-up response from the LLM
log.info(`========== ERROR FOLLOW-UP RESPONSE RECEIVED ==========`);
log.info(`Error follow-up response model: ${errorFollowUpCompletion.response.model}, provider: ${errorFollowUpCompletion.response.provider}`);
log.info(`Error follow-up response text: ${errorFollowUpCompletion.response.text?.substring(0, 150)}${errorFollowUpCompletion.response.text?.length > 150 ? '...' : ''}`);
log.info(`Error follow-up contains tool calls: ${!!errorFollowUpCompletion.response.tool_calls && errorFollowUpCompletion.response.tool_calls.length > 0}`);
// Update current response and break the tool loop
currentResponse = errorFollowUpCompletion.response;
break;
}
}
if (toolCallIterations >= maxToolCallIterations) {
log.info(`========== MAXIMUM TOOL ITERATIONS REACHED ==========`);
log.error(`Reached maximum tool call iterations (${maxToolCallIterations}), terminating loop`);
// Add a message to inform the LLM that we've reached the limit
currentMessages.push({
role: 'system',
content: `Maximum tool call iterations (${maxToolCallIterations}) reached. Please provide your best response with the information gathered so far.`
});
// If streaming, inform the user about iteration limit
if (isStreaming && streamCallback) {
streamCallback(`[Reached maximum of ${maxToolCallIterations} tool calls. Finalizing response...]\n\n`, false);
}
// For Ollama, create a status about reaching max iterations
let toolExecutionStatus;
if (currentResponse.provider === 'Ollama' && currentResponse.tool_calls) {
// Create a special status message about max iterations
toolExecutionStatus = [
{
toolCallId: 'max-iterations',
name: 'system',
success: false,
result: `Maximum tool call iterations (${maxToolCallIterations}) reached.`,
error: `Reached the maximum number of allowed tool calls (${maxToolCallIterations}). Please provide a final response with the information gathered so far.`
}
];
log.info(`Created max iterations status for Ollama`);
}
// Make a final request to get a summary response
const finalFollowUpCompletion = await this.stages.llmCompletion.execute({
messages: currentMessages,
options: {
...modelSelection.options,
enableTools: false, // Disable tools for the final response
// Preserve streaming setting for max iterations response
stream: modelSelection.options.stream,
// For Ollama, include tool execution status
...(currentResponse.provider === 'Ollama' ? { toolExecutionStatus } : {})
}
});
// Update the current response
currentResponse = finalFollowUpCompletion.response;
}
// If streaming was paused for tool execution, resume it now with the final response
if (isStreaming && streamCallback && streamingPaused) {
// First log for debugging
const responseText = currentResponse.text || "";
log.info(`Resuming streaming with final response: ${responseText.length} chars`);
if (responseText.length > 0 && !hasStreamedContent) {
// Resume streaming with the final response text only if we haven't already streamed content
// This is where we send the definitive done:true signal with the complete content
streamCallback(responseText, true);
log.info(`Sent final response with done=true signal and text content`);
} else if (hasStreamedContent) {
log.info(`Content already streamed, sending done=true signal only after tool execution`);
// Just send the done signal without duplicating content
streamCallback('', true);
} else {
// For Anthropic, sometimes text is empty but response is in stream
if ((currentResponse.provider === 'Anthropic' || currentResponse.provider === 'OpenAI') && currentResponse.stream) {
log.info(`Detected empty response text for ${currentResponse.provider} provider with stream, sending stream content directly`);
// For Anthropic/OpenAI with stream mode, we need to stream the final response
if (currentResponse.stream) {
await currentResponse.stream(async (chunk: StreamChunk) => {
// Process the chunk
const processedChunk = await this.processStreamChunk(chunk, input.options);
// Forward to callback
streamCallback(
processedChunk.text,
processedChunk.done || chunk.done || false,
chunk
);
});
log.info(`Completed streaming final ${currentResponse.provider} response after tool execution`);
}
} else {
// Empty response with done=true as fallback
streamCallback('', true);
log.info(`Sent empty final response with done=true signal`);
}
}
}
} else if (toolsEnabled) {
log.info(`========== NO TOOL CALLS DETECTED ==========`);
log.info(`LLM response did not contain any tool calls, skipping tool execution`);
// Handle streaming for responses without tool calls
if (shouldEnableStream && streamCallback && !hasStreamedContent) {
log.info(`Sending final streaming response without tool calls: ${currentResponse.text.length} chars`);
// Send the final response with done=true to complete the streaming
streamCallback(currentResponse.text, true);
log.info(`Sent final non-tool response with done=true signal`);
} else if (shouldEnableStream && streamCallback && hasStreamedContent) {
log.info(`Content already streamed, sending done=true signal only`);
// Just send the done signal without duplicating content
streamCallback('', true);
}
}
// Process the final response
log.info(`========== FINAL RESPONSE PROCESSING ==========`);
const responseProcessingStartTime = Date.now();
const processedResponse = await this.stages.responseProcessing.execute({
response: currentResponse,
options: modelSelection.options
});
this.updateStageMetrics('responseProcessing', responseProcessingStartTime);
log.info(`Final response processed, returning to user (${processedResponse.text.length} chars)`);
// Return the final response to the user
// The ResponseProcessingStage returns {text}, not {response}
// So we update our currentResponse with the processed text
currentResponse.text = processedResponse.text;
log.info(`========== PIPELINE COMPLETE ==========`);
return currentResponse;
} catch (error: any) {
log.info(`========== PIPELINE ERROR ==========`);
log.error(`Error in chat pipeline: ${error.message || String(error)}`);
throw error;
}
}
/**
* Helper method to get an LLM service for query processing
*/
private async getLLMService(): Promise<LLMServiceInterface | null> {
try {
const aiServiceManager = await import('../ai_service_manager.js').then(module => module.default);
return aiServiceManager.getService();
} catch (error: any) {
log.error(`Error getting LLM service: ${error.message || String(error)}`);
return null;
}
}
/**
* Process a stream chunk through the response processing stage
*/
private async processStreamChunk(chunk: StreamChunk, options?: any): Promise<StreamChunk> {
try {
// Only process non-empty chunks
if (!chunk.text) return chunk;
// Create a minimal response object for the processor
const miniResponse = {
text: chunk.text,
model: 'streaming',
provider: 'streaming'
};
// Process the chunk text
const processed = await this.stages.responseProcessing.execute({
response: miniResponse,
options: options
});
// Return processed chunk
return {
...chunk,
text: processed.text
};
} catch (error) {
// On error, return original chunk
log.error(`Error processing stream chunk: ${error}`);
return chunk;
}
}
/**
* Update metrics for a pipeline stage
*/
private updateStageMetrics(stageName: string, startTime: number) {
if (!this.config.enableMetrics) return;
const executionTime = Date.now() - startTime;
const metrics = this.metrics.stageMetrics[stageName];
// Guard against undefined metrics (e.g., for removed stages)
if (!metrics) {
log.info(`WARNING: Attempted to update metrics for unknown stage: ${stageName}`);
return;
}
metrics.totalExecutions++;
metrics.averageExecutionTime =
(metrics.averageExecutionTime * (metrics.totalExecutions - 1) + executionTime) /
metrics.totalExecutions;
}
/**
* Get the current pipeline metrics
*/
getMetrics(): PipelineMetrics {
return this.metrics;
}
/**
* Reset pipeline metrics
*/
resetMetrics(): void {
this.metrics.totalExecutions = 0;
this.metrics.averageExecutionTime = 0;
Object.keys(this.metrics.stageMetrics).forEach(stageName => {
this.metrics.stageMetrics[stageName] = {
totalExecutions: 0,
averageExecutionTime: 0
};
});
}
/**
* Find tool name from tool call ID by looking at previous assistant messages
*/
private getToolNameFromToolCallId(messages: Message[], toolCallId: string): string {
if (!toolCallId) return 'unknown';
// Look for assistant messages with tool_calls
for (let i = messages.length - 1; i >= 0; i--) {
const message = messages[i];
if (message.role === 'assistant' && message.tool_calls) {
// Find the tool call with the matching ID
const toolCall = message.tool_calls.find(tc => tc.id === toolCallId);
if (toolCall && toolCall.function && toolCall.function.name) {
return toolCall.function.name;
}
}
}
return 'unknown';
}
/**
* Validate tool messages to ensure they're properly formatted
*/
private validateToolMessages(messages: Message[]): void {
for (let i = 0; i < messages.length; i++) {
const message = messages[i];
// Ensure tool messages have required fields
if (message.role === 'tool') {
if (!message.tool_call_id) {
log.info(`Tool message missing tool_call_id, adding placeholder`);
message.tool_call_id = `tool_${i}`;
}
// Content should be a string
if (typeof message.content !== 'string') {
log.info(`Tool message content is not a string, converting`);
try {
message.content = JSON.stringify(message.content);
} catch (e) {
message.content = String(message.content);
}
}
}
}
}
}

View File

@ -1,305 +0,0 @@
/**
* Pipeline Adapter - Phase 1 Implementation
*
* Provides a unified interface for both legacy and V2 pipelines:
* - Feature flag to switch between pipelines
* - Translates between different input/output formats
* - Enables gradual migration without breaking changes
* - Provides metrics comparison between pipelines
*
* Usage:
* import pipelineAdapter from './pipeline_adapter.js';
* const response = await pipelineAdapter.execute(input);
*
* The adapter automatically selects the appropriate pipeline based on:
* 1. Environment variable: USE_LEGACY_PIPELINE=true/false
* 2. Option in input: useLegacyPipeline: true/false
* 3. Default: V2 pipeline (new architecture)
*/
import type {
Message,
ChatCompletionOptions,
ChatResponse
} from '../ai_interface.js';
import { ChatPipeline } from './chat_pipeline.js';
import pipelineV2, { type PipelineV2Input, type PipelineV2Output } from './pipeline_v2.js';
import { createLogger, LogLevel } from '../utils/structured_logger.js';
import type { ChatPipelineInput } from './interfaces.js';
import options from '../../options.js';
/**
* Adapter input interface
* Unified interface that works with both pipelines
*/
export interface AdapterInput {
messages: Message[];
options?: ChatCompletionOptions;
noteId?: string;
query?: string;
format?: 'stream' | 'json';
streamCallback?: (text: string, done: boolean, chunk?: any) => Promise<void> | void;
showThinking?: boolean;
requestId?: string;
useLegacyPipeline?: boolean; // Override pipeline selection
}
/**
* Adapter output interface
*/
export interface AdapterOutput extends ChatResponse {
pipelineVersion: 'legacy' | 'v2';
requestId?: string;
processingTime?: number;
}
/**
* Pipeline selection strategy
*/
export enum PipelineStrategy {
LEGACY = 'legacy',
V2 = 'v2',
AUTO = 'auto' // Future: could auto-select based on query complexity
}
/**
* Pipeline Adapter Implementation
*/
export class PipelineAdapter {
private logger = createLogger();
private legacyPipeline: ChatPipeline | null = null;
private metrics = {
legacy: { totalExecutions: 0, totalTime: 0 },
v2: { totalExecutions: 0, totalTime: 0 }
};
/**
* Execute pipeline with automatic selection
*/
async execute(input: AdapterInput): Promise<AdapterOutput> {
const strategy = this.selectPipeline(input);
this.logger.debug('Pipeline adapter executing', {
strategy,
messageCount: input.messages.length,
hasQuery: !!input.query
});
if (strategy === PipelineStrategy.LEGACY) {
return this.executeLegacy(input);
} else {
return this.executeV2(input);
}
}
/**
* Select which pipeline to use
*/
private selectPipeline(input: AdapterInput): PipelineStrategy {
// 1. Check explicit override in input
if (input.useLegacyPipeline !== undefined) {
return input.useLegacyPipeline ? PipelineStrategy.LEGACY : PipelineStrategy.V2;
}
// 2. Check environment variable
const envVar = process.env.USE_LEGACY_PIPELINE;
if (envVar !== undefined) {
return envVar === 'true' ? PipelineStrategy.LEGACY : PipelineStrategy.V2;
}
// 3. Check options (if available)
try {
const useLegacy = (options as any).getOptionBool('useLegacyPipeline');
if (useLegacy !== undefined) {
return useLegacy ? PipelineStrategy.LEGACY : PipelineStrategy.V2;
}
} catch {
// Ignore if option doesn't exist
}
// 4. Default to V2 (new architecture)
return PipelineStrategy.V2;
}
/**
* Execute using legacy pipeline
*/
private async executeLegacy(input: AdapterInput): Promise<AdapterOutput> {
const startTime = Date.now();
try {
// Initialize legacy pipeline if needed
if (!this.legacyPipeline) {
try {
this.legacyPipeline = new ChatPipeline();
} catch (error) {
this.logger.error('Failed to initialize legacy pipeline', error);
throw new Error(
`Legacy pipeline initialization failed: ${error instanceof Error ? error.message : String(error)}`
);
}
}
// Convert adapter input to legacy pipeline input
const legacyInput: ChatPipelineInput = {
messages: input.messages,
options: input.options || {},
noteId: input.noteId,
query: input.query,
format: input.format,
streamCallback: input.streamCallback,
showThinking: input.showThinking
};
// Execute legacy pipeline
const response = await this.legacyPipeline.execute(legacyInput);
// Update metrics
const processingTime = Date.now() - startTime;
this.updateMetrics('legacy', processingTime);
this.logger.info('Legacy pipeline executed', {
duration: processingTime,
responseLength: response.text.length
});
return {
...response,
pipelineVersion: 'legacy',
requestId: input.requestId,
processingTime
};
} catch (error) {
this.logger.error('Legacy pipeline error', error);
throw error;
}
}
/**
* Execute using V2 pipeline
*/
private async executeV2(input: AdapterInput): Promise<AdapterOutput> {
const startTime = Date.now();
try {
// Convert adapter input to V2 pipeline input
const v2Input: PipelineV2Input = {
messages: input.messages,
options: input.options,
noteId: input.noteId,
query: input.query,
streamCallback: input.streamCallback,
requestId: input.requestId
};
// Execute V2 pipeline
const response = await pipelineV2.execute(v2Input);
// Update metrics
const processingTime = Date.now() - startTime;
this.updateMetrics('v2', processingTime);
this.logger.info('V2 pipeline executed', {
duration: processingTime,
responseLength: response.text.length,
stagesExecuted: response.stagesExecuted
});
return {
...response,
pipelineVersion: 'v2',
requestId: response.requestId,
processingTime: response.processingTime
};
} catch (error) {
this.logger.error('V2 pipeline error', error);
throw error;
}
}
/**
* Update metrics
*/
private updateMetrics(pipeline: 'legacy' | 'v2', duration: number): void {
const metric = this.metrics[pipeline];
metric.totalExecutions++;
metric.totalTime += duration;
}
/**
* Get performance metrics
*/
getMetrics(): {
legacy: { executions: number; averageTime: number };
v2: { executions: number; averageTime: number };
improvement: number;
} {
const legacyAvg = this.metrics.legacy.totalExecutions > 0
? this.metrics.legacy.totalTime / this.metrics.legacy.totalExecutions
: 0;
const v2Avg = this.metrics.v2.totalExecutions > 0
? this.metrics.v2.totalTime / this.metrics.v2.totalExecutions
: 0;
const improvement = legacyAvg > 0 && v2Avg > 0
? ((legacyAvg - v2Avg) / legacyAvg * 100)
: 0;
return {
legacy: {
executions: this.metrics.legacy.totalExecutions,
averageTime: legacyAvg
},
v2: {
executions: this.metrics.v2.totalExecutions,
averageTime: v2Avg
},
improvement
};
}
/**
* Reset metrics
*/
resetMetrics(): void {
this.metrics.legacy = { totalExecutions: 0, totalTime: 0 };
this.metrics.v2 = { totalExecutions: 0, totalTime: 0 };
}
/**
* Force specific pipeline for testing
*/
async executeWithPipeline(
input: AdapterInput,
pipeline: PipelineStrategy
): Promise<AdapterOutput> {
const modifiedInput = { ...input, useLegacyPipeline: pipeline === PipelineStrategy.LEGACY };
return this.execute(modifiedInput);
}
}
// Export singleton instance
const pipelineAdapter = new PipelineAdapter();
export default pipelineAdapter;
/**
* Convenience functions
*/
export async function executePipeline(input: AdapterInput): Promise<AdapterOutput> {
return pipelineAdapter.execute(input);
}
export async function executeLegacyPipeline(input: AdapterInput): Promise<AdapterOutput> {
return pipelineAdapter.executeWithPipeline(input, PipelineStrategy.LEGACY);
}
export async function executeV2Pipeline(input: AdapterInput): Promise<AdapterOutput> {
return pipelineAdapter.executeWithPipeline(input, PipelineStrategy.V2);
}
export function getPipelineMetrics() {
return pipelineAdapter.getMetrics();
}

View File

@ -1,60 +0,0 @@
import { BasePipelineStage } from '../pipeline_stage.js';
import type { PipelineInput } from '../interfaces.js';
import aiServiceManager from '../../ai_service_manager.js';
import log from '../../../log.js';
export interface AgentToolsContextInput {
noteId?: string;
query?: string;
showThinking?: boolean;
}
export interface AgentToolsContextOutput {
context: string;
noteId: string;
query: string;
}
/**
* Pipeline stage for adding LLM agent tools context
*/
export class AgentToolsContextStage {
constructor() {
log.info('AgentToolsContextStage initialized');
}
/**
* Execute the agent tools context stage
*/
async execute(input: AgentToolsContextInput): Promise<AgentToolsContextOutput> {
return this.process(input);
}
/**
* Process the input and add agent tools context
*/
protected async process(input: AgentToolsContextInput): Promise<AgentToolsContextOutput> {
const noteId = input.noteId || 'global';
const query = input.query || '';
const showThinking = !!input.showThinking;
log.info(`AgentToolsContextStage: Getting agent tools context for noteId=${noteId}, query="${query.substring(0, 30)}...", showThinking=${showThinking}`);
try {
// Use the AI service manager to get agent tools context
const context = await aiServiceManager.getAgentToolsContext(noteId, query, showThinking);
log.info(`AgentToolsContextStage: Generated agent tools context (${context.length} chars)`);
return {
context,
noteId,
query
};
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
log.error(`AgentToolsContextStage: Error getting agent tools context: ${errorMessage}`);
throw error;
}
}
}

View File

@ -1,72 +0,0 @@
import { BasePipelineStage } from '../pipeline_stage.js';
import type { ContextExtractionInput } from '../interfaces.js';
import aiServiceManager from '../../ai_service_manager.js';
import log from '../../../log.js';
/**
* Context Extraction Pipeline Stage
*/
export interface ContextExtractionOutput {
context: string;
noteId: string;
query: string;
}
/**
* Pipeline stage for extracting context from notes
*/
export class ContextExtractionStage {
constructor() {
log.info('ContextExtractionStage initialized');
}
/**
* Execute the context extraction stage
*/
async execute(input: ContextExtractionInput): Promise<ContextExtractionOutput> {
return this.process(input);
}
/**
* Process the input and extract context
*/
protected async process(input: ContextExtractionInput): Promise<ContextExtractionOutput> {
const { useSmartContext = true } = input;
const noteId = input.noteId || 'global';
const query = input.query || '';
log.info(`ContextExtractionStage: Extracting context for noteId=${noteId}, query="${query.substring(0, 30)}..."`);
try {
let context = '';
// Get enhanced context from the context service
const contextService = aiServiceManager.getContextService();
const llmService = await aiServiceManager.getService();
if (contextService) {
// Use unified context service to get smart context
context = await contextService.processQuery(
query,
llmService,
{ contextNoteId: noteId }
).then(result => result.context);
log.info(`ContextExtractionStage: Generated enhanced context (${context.length} chars)`);
} else {
log.info('ContextExtractionStage: Context service not available, using default context');
}
return {
context,
noteId,
query
};
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
log.error(`ContextExtractionStage: Error extracting context: ${errorMessage}`);
throw error;
}
}
}

View File

@ -1,206 +0,0 @@
import { BasePipelineStage } from '../pipeline_stage.js';
import type { LLMCompletionInput } from '../interfaces.js';
import type { ChatCompletionOptions, ChatResponse, StreamChunk } from '../../ai_interface.js';
import aiServiceManager from '../../ai_service_manager.js';
import toolRegistry from '../../tools/tool_registry.js';
import log from '../../../log.js';
/**
* Pipeline stage for LLM completion with enhanced streaming support
*/
export class LLMCompletionStage extends BasePipelineStage<LLMCompletionInput, { response: ChatResponse }> {
constructor() {
super('LLMCompletion');
}
/**
* Generate LLM completion using the AI service
*
* This enhanced version supports better streaming by forwarding raw provider data
* and ensuring consistent handling of stream options.
*/
protected async process(input: LLMCompletionInput): Promise<{ response: ChatResponse }> {
const { messages, options } = input;
// Add detailed logging about the input messages, particularly useful for tool follow-ups
log.info(`========== LLM COMPLETION STAGE - INPUT MESSAGES ==========`);
log.info(`Total input messages: ${messages.length}`);
// Log if tool messages are present (used for follow-ups)
const toolMessages = messages.filter(m => m.role === 'tool');
if (toolMessages.length > 0) {
log.info(`Contains ${toolMessages.length} tool result messages - likely a tool follow-up request`);
}
// Log the last few messages to understand conversation context
const lastMessages = messages.slice(-3);
lastMessages.forEach((msg, idx) => {
const msgPosition = messages.length - lastMessages.length + idx;
log.info(`Message ${msgPosition} (${msg.role}): ${msg.content?.substring(0, 150)}${msg.content?.length > 150 ? '...' : ''}`);
if (msg.tool_calls) {
log.info(` Contains ${msg.tool_calls.length} tool calls`);
}
if (msg.tool_call_id) {
log.info(` Tool call ID: ${msg.tool_call_id}`);
}
});
// Log completion options
log.info(`LLM completion options: ${JSON.stringify({
model: options.model || 'default',
temperature: options.temperature,
enableTools: options.enableTools,
stream: options.stream,
hasToolExecutionStatus: !!options.toolExecutionStatus
})}`);
// Create a deep copy of options to avoid modifying the original
const updatedOptions: ChatCompletionOptions = JSON.parse(JSON.stringify(options));
// Handle stream option explicitly
if (options.stream !== undefined) {
updatedOptions.stream = options.stream === true;
log.info(`[LLMCompletionStage] Stream explicitly set to: ${updatedOptions.stream}`);
}
// Add capture of raw provider data for streaming
if (updatedOptions.stream) {
// Add a function to capture raw provider data in stream chunks
const originalStreamCallback = updatedOptions.streamCallback;
updatedOptions.streamCallback = async (text, done, rawProviderData) => {
// Create an enhanced chunk with the raw provider data
const enhancedChunk = {
text,
done,
// Include raw provider data if available
raw: rawProviderData
};
// Call the original callback if provided
if (originalStreamCallback) {
return originalStreamCallback(text, done, enhancedChunk);
}
};
}
// Check if tools should be enabled
if (updatedOptions.enableTools !== false) {
const toolDefinitions = toolRegistry.getAllToolDefinitions();
if (toolDefinitions.length > 0) {
updatedOptions.enableTools = true;
updatedOptions.tools = toolDefinitions;
log.info(`Adding ${toolDefinitions.length} tools to LLM request`);
}
}
// Determine which provider to use
let selectedProvider = '';
if (updatedOptions.providerMetadata?.provider) {
selectedProvider = updatedOptions.providerMetadata.provider;
log.info(`Using provider ${selectedProvider} from metadata for model ${updatedOptions.model}`);
}
log.info(`Generating LLM completion, provider: ${selectedProvider || 'auto'}, model: ${updatedOptions?.model || 'default'}`);
// Use specific provider if available
if (selectedProvider && aiServiceManager.isProviderAvailable(selectedProvider)) {
const service = await aiServiceManager.getService(selectedProvider);
log.info(`[LLMCompletionStage] Using specific service for ${selectedProvider}`);
// Generate completion and wrap with enhanced stream handling
const response = await service.generateChatCompletion(messages, updatedOptions);
// If streaming is enabled, enhance the stream method
if (response.stream && typeof response.stream === 'function' && updatedOptions.stream) {
const originalStream = response.stream;
// Replace the stream method with an enhanced version that captures and forwards raw data
response.stream = async (callback) => {
return originalStream(async (chunk) => {
// Forward the chunk with any additional provider-specific data
// Create an enhanced chunk with provider info
const enhancedChunk: StreamChunk = {
...chunk,
// If the provider didn't include raw data, add minimal info
raw: chunk.raw || {
provider: selectedProvider,
model: response.model
}
};
return callback(enhancedChunk);
});
};
}
// Add enhanced logging for debugging tool execution follow-ups
if (toolMessages.length > 0) {
if (response.tool_calls && response.tool_calls.length > 0) {
log.info(`Response contains ${response.tool_calls.length} tool calls`);
response.tool_calls.forEach((toolCall: any, idx: number) => {
log.info(`Tool call ${idx + 1}: ${toolCall.function?.name || 'unnamed'}`);
const args = typeof toolCall.function?.arguments === 'string'
? toolCall.function?.arguments
: JSON.stringify(toolCall.function?.arguments);
log.info(`Arguments: ${args?.substring(0, 100) || '{}'}`);
});
} else {
log.info(`Response contains no tool calls - plain text response`);
}
if (toolMessages.length > 0 && !response.tool_calls) {
log.info(`This appears to be a final response after tool execution (no new tool calls)`);
} else if (toolMessages.length > 0 && response.tool_calls && response.tool_calls.length > 0) {
log.info(`This appears to be a continued tool execution flow (tools followed by more tools)`);
}
}
return { response };
}
// Use auto-selection if no specific provider
log.info(`[LLMCompletionStage] Using auto-selected service`);
const response = await aiServiceManager.generateChatCompletion(messages, updatedOptions);
// Add similar stream enhancement for auto-selected provider
if (response.stream && typeof response.stream === 'function' && updatedOptions.stream) {
const originalStream = response.stream;
response.stream = async (callback) => {
return originalStream(async (chunk) => {
// Create an enhanced chunk with provider info
const enhancedChunk: StreamChunk = {
...chunk,
raw: chunk.raw || {
provider: response.provider,
model: response.model
}
};
return callback(enhancedChunk);
});
};
}
// Add enhanced logging for debugging tool execution follow-ups
if (toolMessages.length > 0) {
if (response.tool_calls && response.tool_calls.length > 0) {
log.info(`Response contains ${response.tool_calls.length} tool calls`);
response.tool_calls.forEach((toolCall: any, idx: number) => {
log.info(`Tool call ${idx + 1}: ${toolCall.function?.name || 'unnamed'}`);
const args = typeof toolCall.function?.arguments === 'string'
? toolCall.function?.arguments
: JSON.stringify(toolCall.function?.arguments);
log.info(`Arguments: ${args?.substring(0, 100) || '{}'}`);
});
} else {
log.info(`Response contains no tool calls - plain text response`);
}
if (toolMessages.length > 0 && !response.tool_calls) {
log.info(`This appears to be a final response after tool execution (no new tool calls)`);
} else if (toolMessages.length > 0 && response.tool_calls && response.tool_calls.length > 0) {
log.info(`This appears to be a continued tool execution flow (tools followed by more tools)`);
}
}
return { response };
}
}

View File

@ -1,63 +0,0 @@
import { BasePipelineStage } from '../pipeline_stage.js';
import type { MessagePreparationInput } from '../interfaces.js';
import type { Message } from '../../ai_interface.js';
import { SYSTEM_PROMPTS } from '../../constants/llm_prompt_constants.js';
import { MessageFormatterFactory } from '../interfaces/message_formatter.js';
import toolRegistry from '../../tools/tool_registry.js';
import log from '../../../log.js';
/**
* Pipeline stage for preparing messages for LLM completion
*/
export class MessagePreparationStage extends BasePipelineStage<MessagePreparationInput, { messages: Message[] }> {
constructor() {
super('MessagePreparation');
}
/**
* Prepare messages for LLM completion, including system prompt and context
* This uses provider-specific formatters to optimize the message structure
*/
protected async process(input: MessagePreparationInput): Promise<{ messages: Message[] }> {
const { messages, context, systemPrompt, options } = input;
// Determine provider from model string if available (format: "provider:model")
let provider = 'default';
if (options?.model && options.model.includes(':')) {
const [providerName] = options.model.split(':');
provider = providerName;
}
// Check if tools are enabled
const toolsEnabled = options?.enableTools === true;
log.info(`Preparing messages for provider: ${provider}, context: ${!!context}, system prompt: ${!!systemPrompt}, tools: ${toolsEnabled}`);
// Get appropriate formatter for this provider
const formatter = MessageFormatterFactory.getFormatter(provider);
// Determine the system prompt to use
let finalSystemPrompt = systemPrompt || SYSTEM_PROMPTS.DEFAULT_SYSTEM_PROMPT;
// If tools are enabled, enhance system prompt with tools guidance
if (toolsEnabled) {
const toolCount = toolRegistry.getAllTools().length;
const toolsPrompt = `You have access to ${toolCount} tools to help you respond. When you need information that might be in the user's notes, use the search_notes tool to find relevant content or the read_note tool to read a specific note by ID. Use tools when specific information is required rather than making assumptions.`;
// Add tools guidance to system prompt
finalSystemPrompt = finalSystemPrompt + '\n\n' + toolsPrompt;
log.info(`Enhanced system prompt with tools guidance: ${toolCount} tools available`);
}
// Format messages using provider-specific approach
const formattedMessages = formatter.formatMessages(
messages,
finalSystemPrompt,
context
);
log.info(`Formatted ${messages.length} messages into ${formattedMessages.length} messages for provider: ${provider}`);
return { messages: formattedMessages };
}
}

View File

@ -1,229 +0,0 @@
import { BasePipelineStage } from '../pipeline_stage.js';
import type { ModelSelectionInput } from '../interfaces.js';
import type { ChatCompletionOptions } from '../../ai_interface.js';
import type { ModelMetadata } from '../../providers/provider_options.js';
import log from '../../../log.js';
import aiServiceManager from '../../ai_service_manager.js';
import { SEARCH_CONSTANTS, MODEL_CAPABILITIES } from "../../constants/search_constants.js";
// Import types
import type { ServiceProviders } from '../../interfaces/ai_service_interfaces.js';
// Import new configuration system
import {
getSelectedProvider,
parseModelIdentifier,
getDefaultModelForProvider,
createModelConfig
} from '../../config/configuration_helpers.js';
import type { ProviderType } from '../../interfaces/configuration_interfaces.js';
/**
* Pipeline stage for selecting the appropriate LLM model
*/
export class ModelSelectionStage extends BasePipelineStage<ModelSelectionInput, { options: ChatCompletionOptions }> {
constructor() {
super('ModelSelection');
}
/**
* Select the appropriate model based on input complexity
*/
protected async process(input: ModelSelectionInput): Promise<{ options: ChatCompletionOptions }> {
const { options: inputOptions, query, contentLength } = input;
// Log input options
log.info(`[ModelSelectionStage] Input options: ${JSON.stringify({
model: inputOptions?.model,
stream: inputOptions?.stream,
enableTools: inputOptions?.enableTools
})}`);
log.info(`[ModelSelectionStage] Stream option in input: ${inputOptions?.stream}, type: ${typeof inputOptions?.stream}`);
// Start with provided options or create a new object
const updatedOptions: ChatCompletionOptions = { ...(inputOptions || {}) };
// Preserve the stream option exactly as it was provided, including undefined state
// This is critical for ensuring the stream option propagates correctly down the pipeline
log.info(`[ModelSelectionStage] After copy, stream: ${updatedOptions.stream}, type: ${typeof updatedOptions.stream}`);
// If model already specified, don't override it
if (updatedOptions.model) {
// Use the new configuration system to parse model identifier
const modelIdentifier = parseModelIdentifier(updatedOptions.model);
if (modelIdentifier.provider) {
// Add provider metadata for backward compatibility
this.addProviderMetadata(updatedOptions, modelIdentifier.provider as ServiceProviders, modelIdentifier.modelId);
// Update the model to be just the model name without provider prefix
updatedOptions.model = modelIdentifier.modelId;
log.info(`Using explicitly specified model: ${modelIdentifier.modelId} from provider: ${modelIdentifier.provider}`);
} else {
log.info(`Using explicitly specified model: ${updatedOptions.model}`);
}
log.info(`[ModelSelectionStage] Returning early with stream: ${updatedOptions.stream}`);
return { options: updatedOptions };
}
// Enable tools by default unless explicitly disabled
updatedOptions.enableTools = updatedOptions.enableTools !== false;
// Add tools if not already provided
if (updatedOptions.enableTools && (!updatedOptions.tools || updatedOptions.tools.length === 0)) {
try {
// Import tool registry and fetch tool definitions
const toolRegistry = (await import('../../tools/tool_registry.js')).default;
const toolDefinitions = toolRegistry.getAllToolDefinitions();
if (toolDefinitions.length > 0) {
updatedOptions.tools = toolDefinitions;
log.info(`Added ${toolDefinitions.length} tools to options`);
} else {
// Try to initialize tools
log.info('No tools found in registry, trying to initialize them');
try {
// Tools are already initialized in the AIServiceManager constructor
// No need to initialize them again
// Try again after initialization
const reinitToolDefinitions = toolRegistry.getAllToolDefinitions();
updatedOptions.tools = reinitToolDefinitions;
log.info(`After initialization, added ${reinitToolDefinitions.length} tools to options`);
} catch (initError: any) {
log.error(`Failed to initialize tools: ${initError.message}`);
}
}
} catch (error: any) {
log.error(`Error loading tools: ${error.message}`);
}
}
// Get selected provider and model using the new configuration system
try {
// Use the configuration helpers to get a validated model config
const selectedProvider = await getSelectedProvider();
if (!selectedProvider) {
throw new Error('No AI provider is selected. Please select a provider in your AI settings.');
}
// First try to get a valid model config (this checks both selection and configuration)
const { getValidModelConfig } = await import('../../config/configuration_helpers.js');
const modelConfig = await getValidModelConfig(selectedProvider);
if (!modelConfig) {
throw new Error(`No default model configured for provider ${selectedProvider}. Please set a default model in your AI settings.`);
}
// Use the configured model
updatedOptions.model = modelConfig.model;
log.info(`Selected provider: ${selectedProvider}, model: ${updatedOptions.model}`);
// Determine query complexity
let queryComplexity = 'low';
if (query) {
// Simple heuristic: longer queries or those with complex terms indicate higher complexity
const complexityIndicators = [
'explain', 'analyze', 'compare', 'evaluate', 'synthesize',
'summarize', 'elaborate', 'investigate', 'research', 'debate'
];
const hasComplexTerms = complexityIndicators.some(term => query.toLowerCase().includes(term));
const isLongQuery = query.length > 100;
const hasMultipleQuestions = (query.match(/\?/g) || []).length > 1;
if ((hasComplexTerms && isLongQuery) || hasMultipleQuestions) {
queryComplexity = 'high';
} else if (hasComplexTerms || isLongQuery) {
queryComplexity = 'medium';
}
}
// Check content length if provided
if (contentLength && contentLength > SEARCH_CONSTANTS.CONTEXT.CONTENT_LENGTH.MEDIUM_THRESHOLD) {
// For large content, favor more powerful models
queryComplexity = contentLength > SEARCH_CONSTANTS.CONTEXT.CONTENT_LENGTH.HIGH_THRESHOLD ? 'high' : 'medium';
}
// Add provider metadata (model is already set above)
this.addProviderMetadata(updatedOptions, selectedProvider as ServiceProviders, updatedOptions.model);
log.info(`Selected model: ${updatedOptions.model} from provider: ${selectedProvider} for query complexity: ${queryComplexity}`);
log.info(`[ModelSelectionStage] Final options: ${JSON.stringify({
model: updatedOptions.model,
stream: updatedOptions.stream,
provider: selectedProvider,
enableTools: updatedOptions.enableTools
})}`);
return { options: updatedOptions };
} catch (error) {
log.error(`Error determining default model: ${error}`);
throw new Error(`Failed to determine AI model configuration: ${error}`);
}
}
/**
* Add provider metadata to the options based on model name
*/
private addProviderMetadata(options: ChatCompletionOptions, provider: ServiceProviders, modelName: string): void {
// Check if we already have providerMetadata
if (options.providerMetadata) {
// If providerMetadata exists but not modelId, add the model name
if (!options.providerMetadata.modelId && modelName) {
options.providerMetadata.modelId = modelName;
}
return;
}
// Use the explicitly provided provider - no automatic fallbacks
let selectedProvider = provider;
// Set the provider metadata in the options
if (selectedProvider) {
// Ensure the provider is one of the valid types
const validProvider = selectedProvider as 'openai' | 'anthropic' | 'ollama' | 'local';
options.providerMetadata = {
provider: validProvider,
modelId: modelName
};
// For backward compatibility, ensure model name is set without prefix
if (options.model && options.model.includes(':')) {
const parsed = parseModelIdentifier(options.model);
options.model = modelName || parsed.modelId;
}
log.info(`Set provider metadata: provider=${selectedProvider}, model=${modelName}`);
}
}
/**
* Get estimated context window for Ollama models
*/
private getOllamaContextWindow(model: string): number {
// Try to find exact matches in MODEL_CAPABILITIES
if (model in MODEL_CAPABILITIES) {
return MODEL_CAPABILITIES[model as keyof typeof MODEL_CAPABILITIES].contextWindowTokens;
}
// Estimate based on model family
if (model.includes('llama3')) {
return MODEL_CAPABILITIES['gpt-4'].contextWindowTokens;
} else if (model.includes('llama2')) {
return MODEL_CAPABILITIES['default'].contextWindowTokens;
} else if (model.includes('mistral') || model.includes('mixtral')) {
return MODEL_CAPABILITIES['gpt-4'].contextWindowTokens;
} else if (model.includes('gemma')) {
return MODEL_CAPABILITIES['gpt-4'].contextWindowTokens;
} else {
return MODEL_CAPABILITIES['default'].contextWindowTokens;
}
}
}

View File

@ -1,44 +0,0 @@
import { BasePipelineStage } from '../pipeline_stage.js';
import type { ResponseProcessingInput } from '../interfaces.js';
import type { ChatResponse } from '../../ai_interface.js';
import log from '../../../log.js';
/**
* Pipeline stage for processing LLM responses
*/
export class ResponseProcessingStage extends BasePipelineStage<ResponseProcessingInput, { text: string }> {
constructor() {
super('ResponseProcessing');
}
/**
* Process the LLM response
*/
protected async process(input: ResponseProcessingInput): Promise<{ text: string }> {
const { response, options } = input;
log.info(`Processing LLM response from model: ${response.model}`);
// Perform any necessary post-processing on the response text
let text = response.text;
// For Markdown formatting, ensure code blocks are properly formatted
if (options?.showThinking && text.includes('thinking:')) {
// Extract and format thinking section
const thinkingMatch = text.match(/thinking:(.*?)(?=answer:|$)/s);
if (thinkingMatch) {
const thinking = thinkingMatch[1].trim();
text = text.replace(/thinking:.*?(?=answer:|$)/s, `**Thinking:** \n\n\`\`\`\n${thinking}\n\`\`\`\n\n`);
}
}
// Clean up response text
text = text.replace(/^\s*assistant:\s*/i, ''); // Remove leading "Assistant:" if present
// Log tokens if available for monitoring
if (response.usage) {
log.info(`Token usage - prompt: ${response.usage.promptTokens}, completion: ${response.usage.completionTokens}, total: ${response.usage.totalTokens}`);
}
return { text };
}
}

View File

@ -1,27 +0,0 @@
import { BasePipelineStage } from '../pipeline_stage.js';
import type { SemanticContextExtractionInput } from '../interfaces.js';
import log from '../../../log.js';
/**
* Pipeline stage for extracting semantic context from notes
* Since vector search has been removed, this now returns empty context
* and relies on other context extraction methods
*/
export class SemanticContextExtractionStage extends BasePipelineStage<SemanticContextExtractionInput, { context: string }> {
constructor() {
super('SemanticContextExtraction');
}
/**
* Extract semantic context based on a query
* Returns empty context since vector search has been removed
*/
protected async process(input: SemanticContextExtractionInput): Promise<{ context: string }> {
const { noteId, query } = input;
log.info(`Semantic context extraction disabled - vector search has been removed. Using tool-based context instead for note ${noteId}`);
// Return empty context since we no longer use vector search
// The LLM will rely on tool calls for context gathering
return { context: "" };
}
}

View File

@ -1,681 +0,0 @@
import type { ChatResponse, Message } from '../../ai_interface.js';
import log from '../../../log.js';
import type { StreamCallback, ToolExecutionInput } from '../interfaces.js';
import { BasePipelineStage } from '../pipeline_stage.js';
import toolRegistry from '../../tools/tool_registry.js';
import chatStorageService from '../../chat_storage_service.js';
import aiServiceManager from '../../ai_service_manager.js';
// Type definitions for tools and validation results
interface ToolInterface {
execute: (args: Record<string, unknown>) => Promise<unknown>;
[key: string]: unknown;
}
interface ToolValidationResult {
toolCall: {
id?: string;
function: {
name: string;
arguments: string | Record<string, unknown>;
};
};
valid: boolean;
tool: ToolInterface | null;
error: string | null;
guidance?: string; // Guidance to help the LLM select better tools/parameters
}
/**
* Pipeline stage for handling LLM tool calling
* This stage is responsible for:
* 1. Detecting tool calls in LLM responses
* 2. Executing the appropriate tools
* 3. Adding tool results back to the conversation
* 4. Determining if we need to make another call to the LLM
*/
export class ToolCallingStage extends BasePipelineStage<ToolExecutionInput, { response: ChatResponse, needsFollowUp: boolean, messages: Message[] }> {
constructor() {
super('ToolCalling');
// Vector search tool has been removed - no preloading needed
}
/**
* Process the LLM response and execute any tool calls
*/
protected async process(input: ToolExecutionInput): Promise<{ response: ChatResponse, needsFollowUp: boolean, messages: Message[] }> {
const { response, messages } = input;
const streamCallback = input.streamCallback as StreamCallback;
log.info(`========== TOOL CALLING STAGE ENTRY ==========`);
log.info(`Response provider: ${response.provider}, model: ${response.model || 'unknown'}`);
log.info(`LLM requested ${response.tool_calls?.length || 0} tool calls from provider: ${response.provider}`);
// Check if the response has tool calls
if (!response.tool_calls || response.tool_calls.length === 0) {
// No tool calls, return original response and messages
log.info(`No tool calls detected in response from provider: ${response.provider}`);
log.info(`===== EXITING TOOL CALLING STAGE: No tool_calls =====`);
return { response, needsFollowUp: false, messages };
}
// Log response details for debugging
if (response.text) {
log.info(`Response text: "${response.text.substring(0, 200)}${response.text.length > 200 ? '...' : ''}"`);
}
// Check if the registry has any tools
const registryTools = toolRegistry.getAllTools();
// Convert ToolHandler[] to ToolInterface[] with proper type safety
const availableTools: ToolInterface[] = registryTools.map(tool => {
// Create a proper ToolInterface from the ToolHandler
const toolInterface: ToolInterface = {
// Pass through the execute method
execute: (args: Record<string, unknown>) => tool.execute(args),
// Include other properties from the tool definition
...tool.definition
};
return toolInterface;
});
log.info(`Available tools in registry: ${availableTools.length}`);
// Log available tools for debugging
if (availableTools.length > 0) {
const availableToolNames = availableTools.map(t => {
// Safely access the name property using type narrowing
if (t && typeof t === 'object' && 'definition' in t &&
t.definition && typeof t.definition === 'object' &&
'function' in t.definition && t.definition.function &&
typeof t.definition.function === 'object' &&
'name' in t.definition.function &&
typeof t.definition.function.name === 'string') {
return t.definition.function.name;
}
return 'unknown';
}).join(', ');
log.info(`Available tools: ${availableToolNames}`);
}
if (availableTools.length === 0) {
log.error(`No tools available in registry, cannot execute tool calls`);
// Try to initialize tools as a recovery step
try {
log.info('Attempting to initialize tools as recovery step');
// Tools are already initialized in the AIServiceManager constructor
// No need to initialize them again
const toolCount = toolRegistry.getAllTools().length;
log.info(`After recovery initialization: ${toolCount} tools available`);
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
log.error(`Failed to initialize tools in recovery step: ${errorMessage}`);
}
}
// Create a copy of messages to add the assistant message with tool calls
const updatedMessages = [...messages];
// Add the assistant message with the tool calls
updatedMessages.push({
role: 'assistant',
content: response.text || "",
tool_calls: response.tool_calls
});
// Execute each tool call and add results to messages
log.info(`========== STARTING TOOL EXECUTION ==========`);
log.info(`Executing ${response.tool_calls?.length || 0} tool calls in parallel`);
const executionStartTime = Date.now();
// First validate all tools before execution
log.info(`Validating ${response.tool_calls?.length || 0} tools before execution`);
const validationResults: ToolValidationResult[] = await Promise.all((response.tool_calls || []).map(async (toolCall) => {
try {
// Get the tool from registry
const tool = toolRegistry.getTool(toolCall.function.name);
if (!tool) {
log.error(`Tool not found in registry: ${toolCall.function.name}`);
// Generate guidance for the LLM when a tool is not found
const guidance = this.generateToolGuidance(toolCall.function.name, `Tool not found: ${toolCall.function.name}`);
return {
toolCall,
valid: false,
tool: null,
error: `Tool not found: ${toolCall.function.name}`,
guidance // Add guidance for the LLM
};
}
// Validate the tool before execution
// Use unknown as an intermediate step for type conversion
const isToolValid = await this.validateToolBeforeExecution(tool as unknown as ToolInterface, toolCall.function.name);
if (!isToolValid) {
throw new Error(`Tool '${toolCall.function.name}' failed validation before execution`);
}
return {
toolCall,
valid: true,
tool: tool as unknown as ToolInterface,
error: null
};
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
return {
toolCall,
valid: false,
tool: null,
error: errorMessage
};
}
}));
// Execute the validated tools
const toolResults = await Promise.all(validationResults.map(async (validation, index) => {
const { toolCall, valid, tool, error } = validation;
try {
log.info(`========== TOOL CALL ${index + 1} OF ${response.tool_calls?.length || 0} ==========`);
log.info(`Tool call ${index + 1} received - Name: ${toolCall.function.name}, ID: ${toolCall.id || 'unknown'}`);
// Log parameters
const argsStr = typeof toolCall.function.arguments === 'string'
? toolCall.function.arguments
: JSON.stringify(toolCall.function.arguments);
log.info(`Tool parameters: ${argsStr}`);
// If validation failed, generate guidance and throw the error
if (!valid || !tool) {
// If we already have guidance from validation, use it, otherwise generate it
const toolGuidance = validation.guidance ||
this.generateToolGuidance(toolCall.function.name,
error || `Unknown validation error for tool '${toolCall.function.name}'`);
// Include the guidance in the error message
throw new Error(`${error || `Unknown validation error for tool '${toolCall.function.name}'`}\n${toolGuidance}`);
}
log.info(`Tool validated successfully: ${toolCall.function.name}`);
// Parse arguments (handle both string and object formats)
let args: Record<string, unknown>;
// At this stage, arguments should already be processed by the provider-specific service
// But we still need to handle different formats just in case
if (typeof toolCall.function.arguments === 'string') {
log.info(`Received string arguments in tool calling stage: ${toolCall.function.arguments.substring(0, 50)}...`);
try {
// Try to parse as JSON first
args = JSON.parse(toolCall.function.arguments) as Record<string, unknown>;
log.info(`Parsed JSON arguments: ${Object.keys(args).join(', ')}`);
} catch (e: unknown) {
// If it's not valid JSON, try to check if it's a stringified object with quotes
const errorMessage = e instanceof Error ? e.message : String(e);
log.info(`Failed to parse arguments as JSON, trying alternative parsing: ${errorMessage}`);
// Sometimes LLMs return stringified JSON with escaped quotes or incorrect quotes
// Try to clean it up
try {
const cleaned = toolCall.function.arguments
.replace(/^['"]/g, '') // Remove surrounding quotes
.replace(/['"]$/g, '') // Remove surrounding quotes
.replace(/\\"/g, '"') // Replace escaped quotes
.replace(/([{,])\s*'([^']+)'\s*:/g, '$1"$2":') // Replace single quotes around property names
.replace(/([{,])\s*(\w+)\s*:/g, '$1"$2":'); // Add quotes around unquoted property names
log.info(`Cleaned argument string: ${cleaned}`);
args = JSON.parse(cleaned) as Record<string, unknown>;
log.info(`Successfully parsed cleaned arguments: ${Object.keys(args).join(', ')}`);
} catch (cleanError: unknown) {
// If all parsing fails, treat it as a text argument
const cleanErrorMessage = cleanError instanceof Error ? cleanError.message : String(cleanError);
log.info(`Failed to parse cleaned arguments: ${cleanErrorMessage}`);
args = { text: toolCall.function.arguments };
log.info(`Using text argument: ${(args.text as string).substring(0, 50)}...`);
}
}
} else {
// Arguments are already an object
args = toolCall.function.arguments as Record<string, unknown>;
log.info(`Using object arguments with keys: ${Object.keys(args).join(', ')}`);
}
// Execute the tool
log.info(`================ EXECUTING TOOL: ${toolCall.function.name} ================`);
log.info(`Tool parameters: ${Object.keys(args).join(', ')}`);
log.info(`Parameters values: ${Object.entries(args).map(([k, v]) => `${k}=${typeof v === 'string' ? v : JSON.stringify(v)}`).join(', ')}`);
// Emit tool start event if streaming is enabled
if (streamCallback) {
const toolExecutionData = {
action: 'start',
tool: {
name: toolCall.function.name,
arguments: args
},
type: 'start' as const
};
// Don't wait for this to complete, but log any errors
const callbackResult = streamCallback('', false, {
text: '',
done: false,
toolExecution: toolExecutionData
});
if (callbackResult instanceof Promise) {
callbackResult.catch((e: Error) => log.error(`Error sending tool execution start event: ${e.message}`));
}
}
const executionStart = Date.now();
let result;
try {
log.info(`Starting tool execution for ${toolCall.function.name}...`);
result = await tool.execute(args);
const executionTime = Date.now() - executionStart;
log.info(`================ TOOL EXECUTION COMPLETED in ${executionTime}ms ================`);
// Record this successful tool execution if there's a sessionId available
if (input.options?.sessionId) {
try {
await chatStorageService.recordToolExecution(
input.options.sessionId,
toolCall.function.name,
toolCall.id || `tool-${Date.now()}-${Math.random().toString(36).substring(2, 9)}`,
args,
result,
undefined // No error for successful execution
);
} catch (storageError) {
log.error(`Failed to record tool execution in chat storage: ${storageError}`);
}
}
// Emit tool completion event if streaming is enabled
if (streamCallback) {
const toolExecutionData = {
action: 'complete',
tool: {
name: toolCall.function.name,
arguments: {} as Record<string, unknown>
},
result: typeof result === 'string' ? result : result as Record<string, unknown>,
type: 'complete' as const
};
// Don't wait for this to complete, but log any errors
const callbackResult = streamCallback('', false, {
text: '',
done: false,
toolExecution: toolExecutionData
});
if (callbackResult instanceof Promise) {
callbackResult.catch((e: Error) => log.error(`Error sending tool execution complete event: ${e.message}`));
}
}
} catch (execError: unknown) {
const executionTime = Date.now() - executionStart;
const errorMessage = execError instanceof Error ? execError.message : String(execError);
log.error(`================ TOOL EXECUTION FAILED in ${executionTime}ms: ${errorMessage} ================`);
// Generate guidance for the failed tool execution
const toolGuidance = this.generateToolGuidance(toolCall.function.name, errorMessage);
// Add the guidance to the error message for the LLM
const enhancedErrorMessage = `${errorMessage}\n${toolGuidance}`;
// Record this failed tool execution if there's a sessionId available
if (input.options?.sessionId) {
try {
await chatStorageService.recordToolExecution(
input.options.sessionId,
toolCall.function.name,
toolCall.id || `tool-${Date.now()}-${Math.random().toString(36).substring(2, 9)}`,
args,
"", // No result for failed execution
enhancedErrorMessage // Use enhanced error message with guidance
);
} catch (storageError) {
log.error(`Failed to record tool execution error in chat storage: ${storageError}`);
}
}
// Emit tool error event if streaming is enabled
if (streamCallback) {
const toolExecutionData = {
action: 'error',
tool: {
name: toolCall.function.name,
arguments: {} as Record<string, unknown>
},
error: enhancedErrorMessage, // Include guidance in the error message
type: 'error' as const
};
// Don't wait for this to complete, but log any errors
const callbackResult = streamCallback('', false, {
text: '',
done: false,
toolExecution: toolExecutionData
});
if (callbackResult instanceof Promise) {
callbackResult.catch((e: Error) => log.error(`Error sending tool execution error event: ${e.message}`));
}
}
// Modify the error to include our guidance
if (execError instanceof Error) {
execError.message = enhancedErrorMessage;
}
throw execError;
}
// Log execution result
const resultSummary = typeof result === 'string'
? `${result.substring(0, 100)}...`
: `Object with keys: ${Object.keys(result).join(', ')}`;
const executionTime = Date.now() - executionStart;
log.info(`Tool execution completed in ${executionTime}ms - Result: ${resultSummary}`);
// Return result with tool call ID
return {
toolCallId: toolCall.id,
name: toolCall.function.name,
result
};
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
log.error(`Error executing tool ${toolCall.function.name}: ${errorMessage}`);
// Emit tool error event if not already handled in the try/catch above
// and if streaming is enabled
// Need to check if error is an object with a name property of type string
const isExecutionError = typeof error === 'object' && error !== null &&
'name' in error && (error as { name: unknown }).name === "ExecutionError";
if (streamCallback && !isExecutionError) {
const toolExecutionData = {
action: 'error',
tool: {
name: toolCall.function.name,
arguments: {} as Record<string, unknown>
},
error: errorMessage,
type: 'error' as const
};
// Don't wait for this to complete, but log any errors
const callbackResult = streamCallback('', false, {
text: '',
done: false,
toolExecution: toolExecutionData
});
if (callbackResult instanceof Promise) {
callbackResult.catch((e: Error) => log.error(`Error sending tool execution error event: ${e.message}`));
}
}
// Return error message as result
return {
toolCallId: toolCall.id,
name: toolCall.function.name,
result: `Error: ${errorMessage}`
};
}
}));
const totalExecutionTime = Date.now() - executionStartTime;
log.info(`========== TOOL EXECUTION COMPLETE ==========`);
log.info(`Completed execution of ${toolResults.length} tools in ${totalExecutionTime}ms`);
// Add each tool result to the messages array
const toolResultMessages: Message[] = [];
let hasEmptyResults = false;
for (const result of toolResults) {
const { toolCallId, name, result: toolResult } = result;
// Format result for message
const resultContent = typeof toolResult === 'string'
? toolResult
: JSON.stringify(toolResult, null, 2);
// Check if result is empty or unhelpful
const isEmptyResult = this.isEmptyToolResult(toolResult, name);
if (isEmptyResult && !resultContent.startsWith('Error:')) {
hasEmptyResults = true;
log.info(`Empty result detected for tool ${name}. Will add suggestion to try different parameters.`);
}
// Add enhancement for empty results
let enhancedContent = resultContent;
if (isEmptyResult && !resultContent.startsWith('Error:')) {
enhancedContent = `${resultContent}\n\nNOTE: This tool returned no useful results with the provided parameters. Consider trying again with different parameters such as broader search terms, different filters, or alternative approaches.`;
}
// Add a new message for the tool result
const toolMessage: Message = {
role: 'tool',
content: enhancedContent,
name: name,
tool_call_id: toolCallId
};
// Log detailed info about each tool result
log.info(`-------- Tool Result for ${name} (ID: ${toolCallId}) --------`);
log.info(`Result type: ${typeof toolResult}`);
log.info(`Result preview: ${resultContent.substring(0, 150)}${resultContent.length > 150 ? '...' : ''}`);
log.info(`Tool result status: ${resultContent.startsWith('Error:') ? 'ERROR' : isEmptyResult ? 'EMPTY' : 'SUCCESS'}`);
updatedMessages.push(toolMessage);
toolResultMessages.push(toolMessage);
}
// Log the decision about follow-up
log.info(`========== FOLLOW-UP DECISION ==========`);
const hasToolResults = toolResultMessages.length > 0;
const hasErrors = toolResultMessages.some(msg => msg.content.startsWith('Error:'));
const needsFollowUp = hasToolResults;
log.info(`Follow-up needed: ${needsFollowUp}`);
log.info(`Reasoning: ${hasToolResults ? 'Has tool results to process' : 'No tool results'} ${hasErrors ? ', contains errors' : ''} ${hasEmptyResults ? ', contains empty results' : ''}`);
// Add a system message with hints for empty results
if (hasEmptyResults && needsFollowUp) {
log.info('Adding system message requiring the LLM to run additional tools with different parameters');
// Build a more directive message based on which tools were empty
const emptyToolNames = toolResultMessages
.filter(msg => this.isEmptyToolResult(msg.content, msg.name || ''))
.map(msg => msg.name);
let directiveMessage = `YOU MUST NOT GIVE UP AFTER A SINGLE EMPTY SEARCH RESULT. `;
if (emptyToolNames.includes('search_notes') || emptyToolNames.includes('keyword_search')) {
directiveMessage += `IMMEDIATELY RUN ANOTHER SEARCH TOOL with broader search terms, alternative keywords, or related concepts. `;
directiveMessage += `Try synonyms, more general terms, or related topics. `;
}
if (emptyToolNames.includes('keyword_search')) {
directiveMessage += `IMMEDIATELY TRY SEARCH_NOTES INSTEAD as it might find matches where keyword search failed. `;
}
directiveMessage += `DO NOT ask the user what to do next or if they want general information. CONTINUE SEARCHING with different parameters.`;
updatedMessages.push({
role: 'system',
content: directiveMessage
});
}
log.info(`Total messages to return to pipeline: ${updatedMessages.length}`);
log.info(`Last 3 messages in conversation:`);
const lastMessages = updatedMessages.slice(-3);
lastMessages.forEach((msg, idx) => {
const position = updatedMessages.length - lastMessages.length + idx;
log.info(`Message ${position} (${msg.role}): ${msg.content?.substring(0, 100)}${msg.content?.length > 100 ? '...' : ''}`);
});
return {
response,
messages: updatedMessages,
needsFollowUp
};
}
/**
* Validate a tool before execution
* @param tool The tool to validate
* @param toolName The name of the tool
*/
private async validateToolBeforeExecution(tool: ToolInterface, toolName: string): Promise<boolean> {
try {
if (!tool) {
log.error(`Tool '${toolName}' not found or failed validation`);
return false;
}
// Validate execute method
if (!tool.execute || typeof tool.execute !== 'function') {
log.error(`Tool '${toolName}' is missing execute method`);
return false;
}
// search_notes tool now uses context handler instead of vector search
if (toolName === 'search_notes') {
log.info(`Tool '${toolName}' validated - uses context handler instead of vector search`);
}
// Add additional tool-specific validations here
return true;
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
log.error(`Error validating tool before execution: ${errorMessage}`);
return false;
}
}
/**
* Generate guidance for the LLM when a tool fails or is not found
* @param toolName The name of the tool that failed
* @param errorMessage The error message from the failed tool
* @returns A guidance message for the LLM with suggestions of what to try next
*/
private generateToolGuidance(toolName: string, errorMessage: string): string {
// Get all available tool names for recommendations
const availableTools = toolRegistry.getAllTools();
const availableToolNames = availableTools
.map(t => {
if (t && typeof t === 'object' && 'definition' in t &&
t.definition && typeof t.definition === 'object' &&
'function' in t.definition && t.definition.function &&
typeof t.definition.function === 'object' &&
'name' in t.definition.function &&
typeof t.definition.function.name === 'string') {
return t.definition.function.name;
}
return '';
})
.filter(name => name !== '');
// Create specific guidance based on the error and tool
let guidance = `TOOL GUIDANCE: The tool '${toolName}' failed with error: ${errorMessage}.\n`;
// Add suggestions based on the specific tool and error
if (toolName === 'attribute_search' && errorMessage.includes('Invalid attribute type')) {
guidance += "CRITICAL REQUIREMENT: The 'attribute_search' tool requires 'attributeType' parameter that must be EXACTLY 'label' or 'relation' (lowercase, no other values).\n";
guidance += "CORRECT EXAMPLE: { \"attributeType\": \"label\", \"attributeName\": \"important\", \"attributeValue\": \"yes\" }\n";
guidance += "INCORRECT EXAMPLE: { \"attributeType\": \"Label\", ... } - Case matters! Must be lowercase.\n";
}
else if (errorMessage.includes('Tool not found')) {
// Provide guidance on available search tools if a tool wasn't found
const searchTools = availableToolNames.filter(name => name.includes('search'));
guidance += `AVAILABLE SEARCH TOOLS: ${searchTools.join(', ')}\n`;
guidance += "TRY SEARCH NOTES: For semantic matches, use 'search_notes' with a query parameter.\n";
guidance += "EXAMPLE: { \"query\": \"your search terms here\" }\n";
}
else if (errorMessage.includes('missing required parameter')) {
// Provide parameter guidance based on the tool name
if (toolName === 'search_notes') {
guidance += "REQUIRED PARAMETERS: The 'search_notes' tool requires a 'query' parameter.\n";
guidance += "EXAMPLE: { \"query\": \"your search terms here\" }\n";
} else if (toolName === 'keyword_search') {
guidance += "REQUIRED PARAMETERS: The 'keyword_search' tool requires a 'query' parameter.\n";
guidance += "EXAMPLE: { \"query\": \"your search terms here\" }\n";
}
}
// Add a general suggestion to try search_notes as a fallback
if (!toolName.includes('search_notes')) {
guidance += "RECOMMENDATION: If specific searches fail, try the 'search_notes' tool which performs semantic searches.\n";
}
return guidance;
}
/**
* Determines if a tool result is effectively empty or unhelpful
* @param result The result from the tool execution
* @param toolName The name of the tool that was executed
* @returns true if the result is considered empty or unhelpful
*/
private isEmptyToolResult(result: unknown, toolName: string): boolean {
// Handle string results
if (typeof result === 'string') {
const trimmed = result.trim();
if (trimmed === '' || trimmed === '[]' || trimmed === '{}') {
return true;
}
// Tool-specific empty results (for string responses)
if (toolName === 'search_notes' &&
(trimmed === 'No matching notes found.' ||
trimmed.includes('No results found') ||
trimmed.includes('No matches found') ||
trimmed.includes('No notes found'))) {
// This is a valid result (empty, but valid), don't mark as empty so LLM can see feedback
return false;
}
if (toolName === 'keyword_search' &&
(trimmed.includes('No matches found') ||
trimmed.includes('No results for'))) {
return true;
}
}
// Handle object/array results
else if (result !== null && typeof result === 'object') {
// Check if it's an empty array
if (Array.isArray(result) && result.length === 0) {
return true;
}
// Check if it's an object with no meaningful properties
// or with properties indicating empty results
if (!Array.isArray(result)) {
if (Object.keys(result).length === 0) {
return true;
}
// Tool-specific object empty checks
const resultObj = result as Record<string, unknown>;
if (toolName === 'search_notes' &&
'results' in resultObj &&
Array.isArray(resultObj.results) &&
resultObj.results.length === 0) {
return true;
}
}
}
return false;
}
}

View File

@ -4,6 +4,8 @@ import options from '../../options.js';
import * as providers from './providers.js';
import type { ChatCompletionOptions, Message } from '../ai_interface.js';
import { Ollama } from 'ollama';
import toolFilterService from '../tool_filter_service.js';
import pipelineConfigService from '../config/pipeline_config.js';
// Mock dependencies
vi.mock('../../options.js', () => ({
@ -63,6 +65,25 @@ vi.mock('./stream_handler.js', () => ({
extractStreamStats: vi.fn()
}));
vi.mock('../tool_filter_service.js', () => ({
default: {
filterToolsForProvider: vi.fn((config, tools) => tools), // Pass through by default
getFilterStats: vi.fn(() => ({
reductionPercent: 0,
estimatedTokenSavings: 0
}))
}
}));
vi.mock('../config/pipeline_config.js', () => ({
default: {
getConfig: vi.fn(() => ({
ollamaContextWindow: 8192,
enableQueryBasedFiltering: true
}))
}
}));
vi.mock('ollama', () => {
const mockStream = {
[Symbol.asyncIterator]: async function* () {
@ -316,12 +337,14 @@ describe('OllamaService', () => {
vi.mocked(options.getOption).mockReturnValue('http://localhost:11434');
const mockTools = [{
name: 'test_tool',
description: 'Test tool',
parameters: {
type: 'object',
properties: {},
required: []
function: {
name: 'test_tool',
description: 'Test tool',
parameters: {
type: 'object',
properties: {},
required: []
}
}
}];
@ -334,10 +357,23 @@ describe('OllamaService', () => {
};
vi.mocked(providers.getOllamaOptions).mockResolvedValueOnce(mockOptions);
// Mock tool filter to return the same tools
vi.mocked(toolFilterService.filterToolsForProvider).mockReturnValueOnce(mockTools);
const chatSpy = vi.spyOn(mockOllamaInstance, 'chat');
await service.generateChatCompletion(messages);
// Verify that tool filtering was called with correct parameters
expect(toolFilterService.filterToolsForProvider).toHaveBeenCalledWith(
expect.objectContaining({
provider: 'ollama',
contextWindow: 8192
}),
mockTools
);
// Verify the filtered tools were passed to Ollama
const calledParams = chatSpy.mock.calls[0][0] as any;
expect(calledParams.tools).toEqual(mockTools);
});

View File

@ -1,258 +0,0 @@
/**
* Attribute Manager Tool
*
* This tool allows the LLM to add, remove, or modify note attributes in Trilium.
*/
import type { Tool, ToolHandler } from './tool_interfaces.js';
import log from '../../log.js';
import becca from '../../../becca/becca.js';
import attributes from '../../attributes.js';
// Define a custom error type guard
function isError(error: unknown): error is Error {
return error instanceof Error || (typeof error === 'object' &&
error !== null && 'message' in error);
}
/**
* Definition of the attribute manager tool
*/
export const attributeManagerToolDefinition: Tool = {
type: 'function',
function: {
name: 'manage_attributes',
description: 'Add, remove, or modify attributes (labels/relations) on a note',
parameters: {
type: 'object',
properties: {
noteId: {
type: 'string',
description: 'System ID of the note to manage attributes for (not the title). This is a unique identifier like "abc123def456".'
},
action: {
type: 'string',
description: 'Action to perform on the attribute',
enum: ['add', 'remove', 'update', 'list']
},
attributeName: {
type: 'string',
description: 'Name of the attribute (e.g., "#tag" for a label, or "relation" for a relation)'
},
attributeValue: {
type: 'string',
description: 'Value of the attribute (for add/update actions). Not needed for label-type attributes.'
}
},
required: ['noteId', 'action']
}
}
};
/**
* Attribute manager tool implementation
*/
export class AttributeManagerTool implements ToolHandler {
public definition: Tool = attributeManagerToolDefinition;
/**
* Execute the attribute manager tool
*/
public async execute(args: { noteId: string, action: string, attributeName?: string, attributeValue?: string }): Promise<string | object> {
try {
const { noteId, action, attributeName, attributeValue } = args;
log.info(`Executing manage_attributes tool - NoteID: "${noteId}", Action: ${action}, AttributeName: ${attributeName || 'not specified'}`);
// Get the note from becca
const note = becca.notes[noteId];
if (!note) {
log.info(`Note with ID ${noteId} not found - returning error`);
return `Error: Note with ID ${noteId} not found`;
}
log.info(`Found note: "${note.title}" (Type: ${note.type})`);
// List all existing attributes
if (action === 'list') {
const noteAttributes = note.getOwnedAttributes();
log.info(`Listing ${noteAttributes.length} attributes for note "${note.title}"`);
const formattedAttributes = noteAttributes.map(attr => ({
name: attr.name,
value: attr.value,
type: attr.type
}));
return {
success: true,
noteId: note.noteId,
title: note.title,
attributeCount: noteAttributes.length,
attributes: formattedAttributes
};
}
// For other actions, attribute name is required
if (!attributeName) {
return 'Error: attributeName is required for add, remove, and update actions';
}
// Perform the requested action
if (action === 'add') {
// Add a new attribute
try {
const startTime = Date.now();
// For label-type attributes (starting with #), no value is needed
const isLabel = attributeName.startsWith('#');
const value = isLabel ? '' : (attributeValue || '');
// Check if attribute already exists
const existingAttrs = note.getOwnedAttributes()
.filter(attr => attr.name === attributeName && attr.value === value);
if (existingAttrs.length > 0) {
log.info(`Attribute ${attributeName}=${value} already exists on note "${note.title}"`);
return {
success: false,
message: `Attribute ${attributeName}=${value || ''} already exists on note "${note.title}"`
};
}
// Create the attribute
await attributes.createLabel(noteId, attributeName, value);
const duration = Date.now() - startTime;
log.info(`Added attribute ${attributeName}=${value || ''} in ${duration}ms`);
return {
success: true,
noteId: note.noteId,
title: note.title,
action: 'add',
attributeName: attributeName,
attributeValue: value,
message: `Added attribute ${attributeName}=${value || ''} to note "${note.title}"`
};
} catch (error: unknown) {
const errorMessage = isError(error) ? error.message : String(error);
log.error(`Error adding attribute: ${errorMessage}`);
return `Error: ${errorMessage}`;
}
} else if (action === 'remove') {
// Remove an attribute
try {
const startTime = Date.now();
// Find the attribute to remove
const attributesToRemove = note.getOwnedAttributes()
.filter(attr => attr.name === attributeName &&
(attributeValue === undefined || attr.value === attributeValue));
if (attributesToRemove.length === 0) {
log.info(`Attribute ${attributeName} not found on note "${note.title}"`);
return {
success: false,
message: `Attribute ${attributeName} not found on note "${note.title}"`
};
}
// Remove all matching attributes
for (const attr of attributesToRemove) {
// Delete attribute by recreating it with isDeleted flag
const attrToDelete = {
attributeId: attr.attributeId,
noteId: attr.noteId,
type: attr.type,
name: attr.name,
value: attr.value,
isDeleted: true,
position: attr.position,
utcDateModified: new Date().toISOString()
};
await attributes.createAttribute(attrToDelete);
}
const duration = Date.now() - startTime;
log.info(`Removed ${attributesToRemove.length} attribute(s) in ${duration}ms`);
return {
success: true,
noteId: note.noteId,
title: note.title,
action: 'remove',
attributeName: attributeName,
attributesRemoved: attributesToRemove.length,
message: `Removed ${attributesToRemove.length} attribute(s) from note "${note.title}"`
};
} catch (error: unknown) {
const errorMessage = isError(error) ? error.message : String(error);
log.error(`Error removing attribute: ${errorMessage}`);
return `Error: ${errorMessage}`;
}
} else if (action === 'update') {
// Update an attribute
try {
const startTime = Date.now();
if (attributeValue === undefined) {
return 'Error: attributeValue is required for update action';
}
// Find the attribute to update
const attributesToUpdate = note.getOwnedAttributes()
.filter(attr => attr.name === attributeName);
if (attributesToUpdate.length === 0) {
log.info(`Attribute ${attributeName} not found on note "${note.title}"`);
return {
success: false,
message: `Attribute ${attributeName} not found on note "${note.title}"`
};
}
// Update all matching attributes
for (const attr of attributesToUpdate) {
// Update by recreating with the same ID but new value
const attrToUpdate = {
attributeId: attr.attributeId,
noteId: attr.noteId,
type: attr.type,
name: attr.name,
value: attributeValue,
isDeleted: false,
position: attr.position,
utcDateModified: new Date().toISOString()
};
await attributes.createAttribute(attrToUpdate);
}
const duration = Date.now() - startTime;
log.info(`Updated ${attributesToUpdate.length} attribute(s) in ${duration}ms`);
return {
success: true,
noteId: note.noteId,
title: note.title,
action: 'update',
attributeName: attributeName,
attributeValue: attributeValue,
attributesUpdated: attributesToUpdate.length,
message: `Updated ${attributesToUpdate.length} attribute(s) on note "${note.title}"`
};
} catch (error: unknown) {
const errorMessage = isError(error) ? error.message : String(error);
log.error(`Error updating attribute: ${errorMessage}`);
return `Error: ${errorMessage}`;
}
} else {
return `Error: Unsupported action "${action}". Supported actions are: add, remove, update, list`;
}
} catch (error: unknown) {
const errorMessage = isError(error) ? error.message : String(error);
log.error(`Error executing manage_attributes tool: ${errorMessage}`);
return `Error: ${errorMessage}`;
}
}
}

View File

@ -1,157 +0,0 @@
/**
* Attribute Search Tool
*
* This tool allows the LLM to search for notes based specifically on attributes.
* It's specialized for finding notes with specific labels or relations.
*/
import type { Tool, ToolHandler } from './tool_interfaces.js';
import log from '../../log.js';
import attributes from '../../attributes.js';
import searchService from '../../search/services/search.js';
import attributeFormatter from '../../attribute_formatter.js';
import type BNote from '../../../becca/entities/bnote.js';
/**
* Definition of the attribute search tool
*/
export const attributeSearchToolDefinition: Tool = {
type: 'function',
function: {
name: 'attribute_search',
description: 'Search for notes with specific attributes (labels or relations). Use this when you need to find notes based on their metadata rather than content. IMPORTANT: attributeType must be exactly "label" or "relation" (lowercase).',
parameters: {
type: 'object',
properties: {
attributeType: {
type: 'string',
description: 'MUST be exactly "label" or "relation" (lowercase, no other values are valid)',
enum: ['label', 'relation']
},
attributeName: {
type: 'string',
description: 'Name of the attribute to search for (e.g., "important", "todo", "related-to")'
},
attributeValue: {
type: 'string',
description: 'Optional value of the attribute. If not provided, will find all notes with the given attribute name.'
},
maxResults: {
type: 'number',
description: 'Maximum number of results to return (default: 20)'
}
},
required: ['attributeType', 'attributeName']
}
}
};
/**
* Attribute search tool implementation
*/
export class AttributeSearchTool implements ToolHandler {
public definition: Tool = attributeSearchToolDefinition;
/**
* Execute the attribute search tool
*/
public async execute(args: { attributeType: string, attributeName: string, attributeValue?: string, maxResults?: number }): Promise<string | object> {
try {
const { attributeType, attributeName, attributeValue, maxResults = 20 } = args;
log.info(`Executing attribute_search tool - Type: "${attributeType}", Name: "${attributeName}", Value: "${attributeValue || 'any'}", MaxResults: ${maxResults}`);
// Validate attribute type
if (attributeType !== 'label' && attributeType !== 'relation') {
return `Error: Invalid attribute type. Must be exactly "label" or "relation" (lowercase). You provided: "${attributeType}".`;
}
// Execute the search
log.info(`Searching for notes with ${attributeType}: ${attributeName}${attributeValue ? ' = ' + attributeValue : ''}`);
const searchStartTime = Date.now();
let results: BNote[] = [];
if (attributeType === 'label') {
// For labels, we can use the existing getNotesWithLabel function
results = attributes.getNotesWithLabel(attributeName, attributeValue);
} else {
// For relations, we need to build a search query
const query = attributeFormatter.formatAttrForSearch({
type: "relation",
name: attributeName,
value: attributeValue
}, attributeValue !== undefined);
results = searchService.searchNotes(query, {
includeArchivedNotes: true,
ignoreHoistedNote: true
});
}
// Limit results
const limitedResults = results.slice(0, maxResults);
const searchDuration = Date.now() - searchStartTime;
log.info(`Attribute search completed in ${searchDuration}ms, found ${results.length} matching notes, returning ${limitedResults.length}`);
if (limitedResults.length > 0) {
// Log top results
limitedResults.slice(0, 3).forEach((note: BNote, index: number) => {
log.info(`Result ${index + 1}: "${note.title}"`);
});
} else {
log.info(`No notes found with ${attributeType} "${attributeName}"${attributeValue ? ' = ' + attributeValue : ''}`);
}
// Format the results
return {
count: limitedResults.length,
totalFound: results.length,
attributeType,
attributeName,
attributeValue,
results: limitedResults.map((note: BNote) => {
// Get relevant attributes of this type
const relevantAttributes = note.getOwnedAttributes()
.filter(attr => attr.type === attributeType && attr.name === attributeName)
.map(attr => ({
type: attr.type,
name: attr.name,
value: attr.value
}));
// Get a preview of the note content
let contentPreview = '';
try {
const content = note.getContent();
if (typeof content === 'string') {
contentPreview = content.length > 150 ? content.substring(0, 150) + '...' : content;
} else if (Buffer.isBuffer(content)) {
contentPreview = '[Binary content]';
} else {
contentPreview = String(content).substring(0, 150) + (String(content).length > 150 ? '...' : '');
}
} catch (_) {
contentPreview = '[Content not available]';
}
return {
noteId: note.noteId,
title: note.title,
preview: contentPreview,
relevantAttributes: relevantAttributes,
type: note.type,
dateCreated: note.dateCreated,
dateModified: note.dateModified
};
})
};
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
log.error(`Error executing attribute_search tool: ${errorMessage}`);
return `Error: ${errorMessage}`;
}
}
}

View File

@ -1,560 +0,0 @@
/**
* Content Extraction Tool
*
* This tool allows the LLM to extract structured information from notes.
*/
import type { Tool, ToolHandler } from './tool_interfaces.js';
import log from '../../log.js';
import becca from '../../../becca/becca.js';
interface CodeBlock {
code: string;
language?: string;
}
interface Heading {
text: string;
level: number; // 1 for H1, 2 for H2, etc.
}
interface List {
type: "unordered" | "ordered";
items: string[];
}
interface Table {
headers: string[];
rows: string[][];
}
/**
* Definition of the content extraction tool
*/
export const contentExtractionToolDefinition: Tool = {
type: 'function',
function: {
name: 'extract_content',
description: 'Extract structured information from a note\'s content, such as lists, tables, or specific sections',
parameters: {
type: 'object',
properties: {
noteId: {
type: 'string',
description: 'ID of the note to extract content from'
},
extractionType: {
type: 'string',
description: 'Type of content to extract',
enum: ['lists', 'tables', 'headings', 'codeBlocks', 'all']
},
format: {
type: 'string',
description: 'Format to return the extracted content in',
enum: ['json', 'markdown', 'text']
},
query: {
type: 'string',
description: 'Optional search query to filter extracted content (e.g., "tasks related to finance")'
}
},
required: ['noteId', 'extractionType']
}
}
};
/**
* Content extraction tool implementation
*/
export class ContentExtractionTool implements ToolHandler {
public definition: Tool = contentExtractionToolDefinition;
/**
* Execute the content extraction tool
*/
public async execute(args: {
noteId: string,
extractionType: 'lists' | 'tables' | 'headings' | 'codeBlocks' | 'all',
format?: 'json' | 'markdown' | 'text',
query?: string
}): Promise<string | object> {
try {
const { noteId, extractionType, format = 'json', query } = args;
log.info(`Executing extract_content tool - NoteID: "${noteId}", Type: ${extractionType}, Format: ${format}`);
// Get the note from becca
const note = becca.notes[noteId];
if (!note) {
log.info(`Note with ID ${noteId} not found - returning error`);
return `Error: Note with ID ${noteId} not found`;
}
log.info(`Found note: "${note.title}" (Type: ${note.type})`);
// Get the note content
const content = await note.getContent();
if (!content) {
return {
success: false,
message: 'Note content is empty'
};
}
log.info(`Retrieved note content, length: ${content.length} chars`);
// Extract the requested content
const extractedContent: any = {};
if (extractionType === 'lists' || extractionType === 'all') {
extractedContent.lists = this.extractLists(typeof content === 'string' ? content : content.toString());
log.info(`Extracted ${extractedContent.lists.length} lists`);
}
if (extractionType === 'tables' || extractionType === 'all') {
extractedContent.tables = this.extractTables(typeof content === 'string' ? content : content.toString());
log.info(`Extracted ${extractedContent.tables.length} tables`);
}
if (extractionType === 'headings' || extractionType === 'all') {
extractedContent.headings = this.extractHeadings(typeof content === 'string' ? content : content.toString());
log.info(`Extracted ${extractedContent.headings.length} headings`);
}
if (extractionType === 'codeBlocks' || extractionType === 'all') {
extractedContent.codeBlocks = this.extractCodeBlocks(typeof content === 'string' ? content : content.toString());
log.info(`Extracted ${extractedContent.codeBlocks.length} code blocks`);
}
// Filter by query if provided
if (query) {
log.info(`Filtering extracted content with query: "${query}"`);
this.filterContentByQuery(extractedContent, query);
}
// Format the response based on requested format
if (format === 'markdown') {
return this.formatAsMarkdown(extractedContent, extractionType);
} else if (format === 'text') {
return this.formatAsText(extractedContent, extractionType);
} else {
// Default to JSON format
return {
success: true,
noteId: note.noteId,
title: note.title,
extractionType,
content: extractedContent
};
}
} catch (error: any) {
log.error(`Error executing extract_content tool: ${error.message || String(error)}`);
return `Error: ${error.message || String(error)}`;
}
}
/**
* Extract lists from HTML content
*/
private extractLists(content: string): List[] {
const lists: List[] = [];
// Extract unordered lists
const ulRegex = /<ul[^>]*>([\s\S]*?)<\/ul>/gi;
let ulMatch;
while ((ulMatch = ulRegex.exec(content)) !== null) {
const listContent = ulMatch[1];
const items = this.extractListItems(listContent);
if (items.length > 0) {
lists.push({
type: 'unordered',
items
});
}
}
// Extract ordered lists
const olRegex = /<ol[^>]*>([\s\S]*?)<\/ol>/gi;
let olMatch;
while ((olMatch = olRegex.exec(content)) !== null) {
const listContent = olMatch[1];
const items = this.extractListItems(listContent);
if (items.length > 0) {
lists.push({
type: 'ordered',
items
});
}
}
return lists;
}
/**
* Extract list items from list content
*/
private extractListItems(listContent: string): string[] {
const items: string[] = [];
const itemRegex = /<li[^>]*>([\s\S]*?)<\/li>/gi;
let itemMatch;
while ((itemMatch = itemRegex.exec(listContent)) !== null) {
const itemText = this.stripHtml(itemMatch[1]).trim();
if (itemText) {
items.push(itemText);
}
}
return items;
}
/**
* Extract tables from HTML content
*/
private extractTables(content: string): Table[] {
const tables: Table[] = [];
const tableRegex = /<table[^>]*>([\s\S]*?)<\/table>/gi;
let tableMatch: RegExpExecArray | null;
while ((tableMatch = tableRegex.exec(content)) !== null) {
const tableContent = tableMatch[1];
const headers: string[] = [];
const rows: string[][] = [];
// Extract table headers
const headerRegex = /<th[^>]*>([\s\S]*?)<\/th>/gi;
let headerMatch;
while ((headerMatch = headerRegex.exec(tableContent)) !== null) {
headers.push(this.stripHtml(headerMatch[1]).trim());
}
// Extract table rows
const rowRegex = /<tr[^>]*>([\s\S]*?)<\/tr>/gi;
let rowMatch;
while ((rowMatch = rowRegex.exec(tableContent)) !== null) {
const rowContent = rowMatch[1];
const cells: string[] = [];
const cellRegex = /<td[^>]*>([\s\S]*?)<\/td>/gi;
let cellMatch;
while ((cellMatch = cellRegex.exec(rowContent)) !== null) {
cells.push(this.stripHtml(cellMatch[1]).trim());
}
if (cells.length > 0) {
rows.push(cells);
}
}
if (headers.length > 0 || rows.length > 0) {
tables.push({
headers,
rows
});
}
}
return tables;
}
/**
* Extract headings from HTML content
*/
private extractHeadings(content: string): Array<{ level: number, text: string }> {
const headings: Heading[] = [];
for (let i = 1; i <= 6; i++) {
const headingRegex = new RegExp(`<h${i}[^>]*>([\\s\\S]*?)<\/h${i}>`, 'gi');
let headingMatch;
while ((headingMatch = headingRegex.exec(content)) !== null) {
const headingText = this.stripHtml(headingMatch[1]).trim();
if (headingText) {
headings.push({
level: i,
text: headingText
});
}
}
}
return headings;
}
/**
* Extract code blocks from HTML content
*/
private extractCodeBlocks(content: string): Array<{ language?: string, code: string }> {
const codeBlocks: CodeBlock[] = [];
// Look for <pre> and <code> blocks
const preRegex = /<pre[^>]*>([\s\S]*?)<\/pre>/gi;
let preMatch;
while ((preMatch = preRegex.exec(content)) !== null) {
const preContent = preMatch[1];
// Check if there's a nested <code> tag
const codeMatch = /<code[^>]*>([\s\S]*?)<\/code>/i.exec(preContent);
if (codeMatch) {
// Extract language if it's in the class attribute
const classMatch = /class="[^"]*language-([^"\s]+)[^"]*"/i.exec(preMatch[0]);
codeBlocks.push({
language: classMatch ? classMatch[1] : undefined,
code: this.decodeHtmlEntities(codeMatch[1]).trim()
});
} else {
// Just a <pre> without <code>
codeBlocks.push({
code: this.decodeHtmlEntities(preContent).trim()
});
}
}
// Also look for standalone <code> blocks not inside <pre>
const standaloneCodeRegex = /(?<!<pre[^>]*>[\s\S]*?)<code[^>]*>([\s\S]*?)<\/code>/gi;
let standaloneCodeMatch;
while ((standaloneCodeMatch = standaloneCodeRegex.exec(content)) !== null) {
codeBlocks.push({
code: this.decodeHtmlEntities(standaloneCodeMatch[1]).trim()
});
}
return codeBlocks;
}
/**
* Filter content by query
*/
private filterContentByQuery(content: any, query: string): void {
const lowerQuery = query.toLowerCase();
if (content.lists) {
content.lists = content.lists.filter((list: { type: string; items: string[] }) => {
// Check if any item in the list contains the query
return list.items.some((item: string) => item.toLowerCase().includes(lowerQuery));
});
// Also filter individual items in each list
content.lists.forEach((list: { type: string; items: string[] }) => {
list.items = list.items.filter((item: string) => item.toLowerCase().includes(lowerQuery));
});
}
if (content.headings) {
content.headings = content.headings.filter((heading: { level: number; text: string }) =>
heading.text.toLowerCase().includes(lowerQuery)
);
}
if (content.tables) {
content.tables = content.tables.filter((table: { headers: string[]; rows: string[][] }) => {
// Check if any header contains the query
const headerMatch = table.headers.some((header: string) =>
header.toLowerCase().includes(lowerQuery)
);
// Check if any cell in any row contains the query
const cellMatch = table.rows.some((row: string[]) =>
row.some((cell: string) => cell.toLowerCase().includes(lowerQuery))
);
return headerMatch || cellMatch;
});
}
if (content.codeBlocks) {
content.codeBlocks = content.codeBlocks.filter((block: { language?: string; code: string }) =>
block.code.toLowerCase().includes(lowerQuery)
);
}
}
/**
* Format extracted content as Markdown
*/
private formatAsMarkdown(content: any, extractionType: string): string {
let markdown = '';
if (extractionType === 'lists' || extractionType === 'all') {
if (content.lists && content.lists.length > 0) {
markdown += '## Lists\n\n';
content.lists.forEach((list: any, index: number) => {
markdown += `### List ${index + 1} (${list.type})\n\n`;
list.items.forEach((item: string) => {
if (list.type === 'unordered') {
markdown += `- ${item}\n`;
} else {
markdown += `1. ${item}\n`;
}
});
markdown += '\n';
});
}
}
if (extractionType === 'headings' || extractionType === 'all') {
if (content.headings && content.headings.length > 0) {
markdown += '## Headings\n\n';
content.headings.forEach((heading: any) => {
markdown += `${'#'.repeat(heading.level)} ${heading.text}\n\n`;
});
}
}
if (extractionType === 'tables' || extractionType === 'all') {
if (content.tables && content.tables.length > 0) {
markdown += '## Tables\n\n';
content.tables.forEach((table: any, index: number) => {
markdown += `### Table ${index + 1}\n\n`;
// Add headers
if (table.headers.length > 0) {
markdown += '| ' + table.headers.join(' | ') + ' |\n';
markdown += '| ' + table.headers.map(() => '---').join(' | ') + ' |\n';
}
// Add rows
table.rows.forEach((row: string[]) => {
markdown += '| ' + row.join(' | ') + ' |\n';
});
markdown += '\n';
});
}
}
if (extractionType === 'codeBlocks' || extractionType === 'all') {
if (content.codeBlocks && content.codeBlocks.length > 0) {
markdown += '## Code Blocks\n\n';
content.codeBlocks.forEach((block: any, index: number) => {
markdown += `### Code Block ${index + 1}\n\n`;
if (block.language) {
markdown += '```' + block.language + '\n';
} else {
markdown += '```\n';
}
markdown += block.code + '\n';
markdown += '```\n\n';
});
}
}
return markdown.trim();
}
/**
* Format extracted content as plain text
*/
private formatAsText(content: any, extractionType: string): string {
let text = '';
if (extractionType === 'lists' || extractionType === 'all') {
if (content.lists && content.lists.length > 0) {
text += 'LISTS:\n\n';
content.lists.forEach((list: any, index: number) => {
text += `List ${index + 1} (${list.type}):\n\n`;
list.items.forEach((item: string, itemIndex: number) => {
if (list.type === 'unordered') {
text += `${item}\n`;
} else {
text += `${itemIndex + 1}. ${item}\n`;
}
});
text += '\n';
});
}
}
if (extractionType === 'headings' || extractionType === 'all') {
if (content.headings && content.headings.length > 0) {
text += 'HEADINGS:\n\n';
content.headings.forEach((heading: any) => {
text += `${heading.text} (Level ${heading.level})\n`;
});
text += '\n';
}
}
if (extractionType === 'tables' || extractionType === 'all') {
if (content.tables && content.tables.length > 0) {
text += 'TABLES:\n\n';
content.tables.forEach((table: any, index: number) => {
text += `Table ${index + 1}:\n\n`;
// Add headers
if (table.headers.length > 0) {
text += table.headers.join(' | ') + '\n';
text += table.headers.map(() => '-----').join(' | ') + '\n';
}
// Add rows
table.rows.forEach((row: string[]) => {
text += row.join(' | ') + '\n';
});
text += '\n';
});
}
}
if (extractionType === 'codeBlocks' || extractionType === 'all') {
if (content.codeBlocks && content.codeBlocks.length > 0) {
text += 'CODE BLOCKS:\n\n';
content.codeBlocks.forEach((block: any, index: number) => {
text += `Code Block ${index + 1}`;
if (block.language) {
text += ` (${block.language})`;
}
text += ':\n\n';
text += block.code + '\n\n';
});
}
}
return text.trim();
}
/**
* Strip HTML tags from content
*/
private stripHtml(html: string): string {
return html.replace(/<[^>]*>/g, '');
}
/**
* Decode HTML entities
*/
private decodeHtmlEntities(text: string): string {
return text
.replace(/&lt;/g, '<')
.replace(/&gt;/g, '>')
.replace(/&amp;/g, '&')
.replace(/&quot;/g, '"')
.replace(/&#39;/g, "'")
.replace(/&nbsp;/g, ' ');
}
}

View File

@ -1,126 +0,0 @@
/**
* Keyword Search Notes Tool
*
* This tool allows the LLM to search for notes using exact keyword matching and attribute-based filters.
* It complements the semantic search tool by providing more precise, rule-based search capabilities.
*/
import type { Tool, ToolHandler } from './tool_interfaces.js';
import log from '../../log.js';
import searchService from '../../search/services/search.js';
import becca from '../../../becca/becca.js';
/**
* Definition of the keyword search notes tool
*/
export const keywordSearchToolDefinition: Tool = {
type: 'function',
function: {
name: 'keyword_search_notes',
description: 'Search for notes using exact keyword matching and attribute filters. Use this for precise searches when you need exact matches or want to filter by attributes.',
parameters: {
type: 'object',
properties: {
query: {
type: 'string',
description: 'The search query using Trilium\'s search syntax. Examples: "rings tolkien" (find notes with both words), "#book #year >= 2000" (notes with label "book" and "year" attribute >= 2000), "note.content *=* important" (notes with "important" in content)'
},
maxResults: {
type: 'number',
description: 'Maximum number of results to return (default: 10)'
},
includeArchived: {
type: 'boolean',
description: 'Whether to include archived notes in search results (default: false)'
}
},
required: ['query']
}
}
};
/**
* Keyword search notes tool implementation
*/
export class KeywordSearchTool implements ToolHandler {
public definition: Tool = keywordSearchToolDefinition;
/**
* Execute the keyword search notes tool
*/
public async execute(args: { query: string, maxResults?: number, includeArchived?: boolean }): Promise<string | object> {
try {
const { query, maxResults = 10, includeArchived = false } = args;
log.info(`Executing keyword_search_notes tool - Query: "${query}", MaxResults: ${maxResults}, IncludeArchived: ${includeArchived}`);
// Execute the search
log.info(`Performing keyword search for: "${query}"`);
const searchStartTime = Date.now();
// Find results with the given query
const searchContext = {
includeArchivedNotes: includeArchived,
fuzzyAttributeSearch: false
};
const searchResults = searchService.searchNotes(query, searchContext);
const limitedResults = searchResults.slice(0, maxResults);
const searchDuration = Date.now() - searchStartTime;
log.info(`Keyword search completed in ${searchDuration}ms, found ${searchResults.length} matching notes, returning ${limitedResults.length}`);
if (limitedResults.length > 0) {
// Log top results
limitedResults.slice(0, 3).forEach((result, index) => {
log.info(`Result ${index + 1}: "${result.title}"`);
});
} else {
log.info(`No matching notes found for query: "${query}"`);
}
// Format the results
return {
count: limitedResults.length,
totalFound: searchResults.length,
results: limitedResults.map(note => {
// Get a preview of the note content
let contentPreview = '';
try {
const content = note.getContent();
if (typeof content === 'string') {
contentPreview = content.length > 150 ? content.substring(0, 150) + '...' : content;
} else if (Buffer.isBuffer(content)) {
contentPreview = '[Binary content]';
} else {
contentPreview = String(content).substring(0, 150) + (String(content).length > 150 ? '...' : '');
}
} catch (e) {
contentPreview = '[Content not available]';
}
// Get note attributes
const attributes = note.getOwnedAttributes().map(attr => ({
type: attr.type,
name: attr.name,
value: attr.value
}));
return {
noteId: note.noteId,
title: note.title,
preview: contentPreview,
attributes: attributes.length > 0 ? attributes : undefined,
type: note.type,
mime: note.mime,
isArchived: note.isArchived
};
})
};
} catch (error: any) {
log.error(`Error executing keyword_search_notes tool: ${error.message || String(error)}`);
return `Error: ${error.message || String(error)}`;
}
}
}

View File

@ -1,190 +0,0 @@
/**
* Note Creation Tool
*
* This tool allows the LLM to create new notes in Trilium.
*/
import type { Tool, ToolHandler } from './tool_interfaces.js';
import log from '../../log.js';
import becca from '../../../becca/becca.js';
import notes from '../../notes.js';
import attributes from '../../attributes.js';
import type { BNote } from '../../backend_script_entrypoint.js';
/**
* Definition of the note creation tool
*/
export const noteCreationToolDefinition: Tool = {
type: 'function',
function: {
name: 'create_note',
description: 'Create a new note in Trilium with the specified content and attributes',
parameters: {
type: 'object',
properties: {
parentNoteId: {
type: 'string',
description: 'System ID of the parent note under which to create the new note (not the title). This is a unique identifier like "abc123def456". If not specified, creates under root.'
},
title: {
type: 'string',
description: 'Title of the new note'
},
content: {
type: 'string',
description: 'Content of the new note'
},
type: {
type: 'string',
description: 'Type of the note (text, code, etc.)',
enum: ['text', 'code', 'file', 'image', 'search', 'relation-map', 'book', 'mermaid', 'canvas']
},
mime: {
type: 'string',
description: 'MIME type of the note (e.g., text/html, application/json). Only required for certain note types.'
},
attributes: {
type: 'array',
description: 'Array of attributes to set on the note (e.g., [{"name":"#tag"}, {"name":"priority", "value":"high"}])',
items: {
type: 'object',
properties: {
name: {
type: 'string',
description: 'Name of the attribute'
},
value: {
type: 'string',
description: 'Value of the attribute (optional)'
}
},
required: ['name']
}
}
},
required: ['title', 'content']
}
}
};
/**
* Note creation tool implementation
*/
export class NoteCreationTool implements ToolHandler {
public definition: Tool = noteCreationToolDefinition;
/**
* Execute the note creation tool
*/
public async execute(args: {
parentNoteId?: string,
title: string,
content: string,
type?: string,
mime?: string,
attributes?: Array<{ name: string, value?: string }>
}): Promise<string | object> {
try {
const { parentNoteId, title, content, type = 'text', mime } = args;
log.info(`Executing create_note tool - Title: "${title}", Type: ${type}, ParentNoteId: ${parentNoteId || 'root'}`);
// Validate parent note exists if specified
let parent: BNote | null = null;
if (parentNoteId) {
parent = becca.notes[parentNoteId];
if (!parent) {
return `Error: Parent note with ID ${parentNoteId} not found. Please specify a valid parent note ID.`;
}
} else {
// Use root note if no parent specified
parent = becca.getNote('root');
}
// Make sure we have a valid parent at this point
if (!parent) {
return 'Error: Failed to get a valid parent note. Root note may not be accessible.';
}
// Determine the appropriate mime type
let noteMime = mime;
if (!noteMime) {
// Set default mime types based on note type
switch (type) {
case 'text':
noteMime = 'text/html';
break;
case 'code':
noteMime = 'text/plain';
break;
case 'file':
noteMime = 'application/octet-stream';
break;
case 'image':
noteMime = 'image/png';
break;
default:
noteMime = 'text/html';
}
}
// Create the note
const createStartTime = Date.now();
const result = notes.createNewNote({
parentNoteId: parent.noteId,
title: title,
content: content,
type: type as any, // Cast as any since not all string values may match the exact NoteType union
mime: noteMime
});
const noteId = result.note.noteId;
const createDuration = Date.now() - createStartTime;
if (!noteId) {
return 'Error: Failed to create note. An unknown error occurred.';
}
log.info(`Note created successfully in ${createDuration}ms, ID: ${noteId}`);
// Add attributes if specified
if (args.attributes && args.attributes.length > 0) {
log.info(`Adding ${args.attributes.length} attributes to the note`);
for (const attr of args.attributes) {
if (!attr.name) continue;
const attrStartTime = Date.now();
// Use createLabel for label attributes
if (attr.name.startsWith('#') || attr.name.startsWith('~')) {
await attributes.createLabel(noteId, attr.name.substring(1), attr.value || '');
} else {
// Use createRelation for relation attributes if value looks like a note ID
if (attr.value && attr.value.match(/^[a-zA-Z0-9_]{12}$/)) {
await attributes.createRelation(noteId, attr.name, attr.value);
} else {
// Default to label for other attributes
await attributes.createLabel(noteId, attr.name, attr.value || '');
}
}
const attrDuration = Date.now() - attrStartTime;
log.info(`Added attribute ${attr.name}=${attr.value || ''} in ${attrDuration}ms`);
}
}
// Return the new note's information
const newNote = becca.notes[noteId];
return {
success: true,
noteId: noteId,
title: newNote.title,
type: newNote.type,
message: `Note "${title}" created successfully`
};
} catch (error: any) {
log.error(`Error executing create_note tool: ${error.message || String(error)}`);
return `Error: ${error.message || String(error)}`;
}
}
}

View File

@ -1,181 +0,0 @@
/**
* Note Summarization Tool
*
* This tool allows the LLM to generate concise summaries of longer notes.
*/
import type { Tool, ToolHandler } from './tool_interfaces.js';
import log from '../../log.js';
import becca from '../../../becca/becca.js';
import aiServiceManager from '../ai_service_manager.js';
import { SEARCH_CONSTANTS } from '../constants/search_constants.js';
/**
* Definition of the note summarization tool
*/
export const noteSummarizationToolDefinition: Tool = {
type: 'function',
function: {
name: 'summarize_note',
description: 'Generate a concise summary of a note\'s content',
parameters: {
type: 'object',
properties: {
noteId: {
type: 'string',
description: 'System ID of the note to summarize (not the title). This is a unique identifier like "abc123def456".'
},
maxLength: {
type: 'number',
description: 'Maximum length of the summary in characters (default: 500)'
},
format: {
type: 'string',
description: 'Format of the summary',
enum: ['paragraph', 'bullets', 'executive']
},
focus: {
type: 'string',
description: 'Optional focus for the summary (e.g., "technical details", "key findings")'
}
},
required: ['noteId']
}
}
};
/**
* Note summarization tool implementation
*/
export class NoteSummarizationTool implements ToolHandler {
public definition: Tool = noteSummarizationToolDefinition;
/**
* Execute the note summarization tool
*/
public async execute(args: {
noteId: string,
maxLength?: number,
format?: 'paragraph' | 'bullets' | 'executive',
focus?: string
}): Promise<string | object> {
try {
const { noteId, maxLength = SEARCH_CONSTANTS.LIMITS.DEFAULT_NOTE_SUMMARY_LENGTH, format = 'paragraph', focus } = args;
log.info(`Executing summarize_note tool - NoteID: "${noteId}", MaxLength: ${maxLength}, Format: ${format}`);
// Get the note from becca
const note = becca.notes[noteId];
if (!note) {
log.info(`Note with ID ${noteId} not found - returning error`);
return `Error: Note with ID ${noteId} not found`;
}
log.info(`Found note: "${note.title}" (Type: ${note.type})`);
// Get the note content
const content = await note.getContent();
if (!content || typeof content !== 'string' || content.trim().length === 0) {
return {
success: false,
message: 'Note content is empty or invalid'
};
}
log.info(`Retrieved note content, length: ${content.length} chars`);
// Check if content needs summarization (if it's short enough, just return it)
if (content.length <= maxLength && !focus) {
log.info(`Note content is already shorter than maxLength, returning as is`);
return {
success: true,
noteId: note.noteId,
title: note.title,
summary: this.cleanHtml(content),
wasAlreadyShort: true
};
}
// Remove HTML tags for summarization
const cleanContent = this.cleanHtml(content);
// Generate the summary using the AI service
const aiService = await aiServiceManager.getService();
log.info(`Using ${aiService.getName()} to generate summary`);
// Create a prompt based on format and focus
let prompt = `Summarize the following text`;
if (focus) {
prompt += ` with a focus on ${focus}`;
}
if (format === 'bullets') {
prompt += ` in a bullet point format`;
} else if (format === 'executive') {
prompt += ` as a brief executive summary`;
} else {
prompt += ` in a concise paragraph`;
}
prompt += `. Keep the summary under ${maxLength} characters:\n\n${cleanContent}`;
// Generate the summary
const summaryStartTime = Date.now();
const completion = await aiService.generateChatCompletion([
{ role: 'system', content: 'You are a skilled summarizer. Create concise, accurate summaries while preserving the key information.' },
{ role: 'user', content: prompt }
], {
temperature: SEARCH_CONSTANTS.TEMPERATURE.QUERY_PROCESSOR, // Lower temperature for more focused summaries
maxTokens: SEARCH_CONSTANTS.LIMITS.DEFAULT_MAX_TOKENS // Enough tokens for the summary
});
const summaryDuration = Date.now() - summaryStartTime;
log.info(`Generated summary in ${summaryDuration}ms, length: ${completion.text.length} chars`);
return {
success: true,
noteId: note.noteId,
title: note.title,
originalLength: content.length,
summary: completion.text,
format: format,
focus: focus || 'general content'
};
} catch (error: any) {
log.error(`Error executing summarize_note tool: ${error.message || String(error)}`);
return `Error: ${error.message || String(error)}`;
}
}
/**
* Clean HTML content for summarization
*/
private cleanHtml(html: string): string {
if (typeof html !== 'string') {
return '';
}
// Remove HTML tags
let text = html.replace(/<[^>]*>/g, '');
// Decode common HTML entities
text = text
.replace(/&lt;/g, '<')
.replace(/&gt;/g, '>')
.replace(/&quot;/g, '"')
.replace(/&#39;/g, "'")
.replace(/&nbsp;/g, ' ')
.replace(/&amp;/g, '&');
// Normalize whitespace
text = text.replace(/\s+/g, ' ').trim();
return text;
}
}

View File

@ -1,140 +0,0 @@
/**
* Note Update Tool
*
* This tool allows the LLM to update existing notes in Trilium.
*/
import type { Tool, ToolHandler } from './tool_interfaces.js';
import log from '../../log.js';
import becca from '../../../becca/becca.js';
import notes from '../../notes.js';
/**
* Definition of the note update tool
*/
export const noteUpdateToolDefinition: Tool = {
type: 'function',
function: {
name: 'update_note',
description: 'Update the content or title of an existing note',
parameters: {
type: 'object',
properties: {
noteId: {
type: 'string',
description: 'System ID of the note to update (not the title). This is a unique identifier like "abc123def456" that must be used to identify the specific note.'
},
title: {
type: 'string',
description: 'New title for the note (if you want to change it)'
},
content: {
type: 'string',
description: 'New content for the note (if you want to change it)'
},
mode: {
type: 'string',
description: 'How to update content: replace (default), append, or prepend',
enum: ['replace', 'append', 'prepend']
}
},
required: ['noteId']
}
}
};
/**
* Note update tool implementation
*/
export class NoteUpdateTool implements ToolHandler {
public definition: Tool = noteUpdateToolDefinition;
/**
* Execute the note update tool
*/
public async execute(args: { noteId: string, title?: string, content?: string, mode?: 'replace' | 'append' | 'prepend' }): Promise<string | object> {
try {
const { noteId, title, content, mode = 'replace' } = args;
if (!title && !content) {
return 'Error: At least one of title or content must be provided to update a note.';
}
log.info(`Executing update_note tool - NoteID: "${noteId}", Mode: ${mode}`);
// Get the note from becca
const note = becca.notes[noteId];
if (!note) {
log.info(`Note with ID ${noteId} not found - returning error`);
return `Error: Note with ID ${noteId} not found`;
}
log.info(`Found note: "${note.title}" (Type: ${note.type})`);
let titleUpdateResult;
let contentUpdateResult;
// Update title if provided
if (title && title !== note.title) {
const titleStartTime = Date.now();
try {
// Update the note title by setting it and saving
note.title = title;
note.save();
const titleDuration = Date.now() - titleStartTime;
log.info(`Updated note title to "${title}" in ${titleDuration}ms`);
titleUpdateResult = `Title updated from "${note.title}" to "${title}"`;
} catch (error: any) {
log.error(`Error updating note title: ${error.message || String(error)}`);
titleUpdateResult = `Failed to update title: ${error.message || 'Unknown error'}`;
}
}
// Update content if provided
if (content) {
const contentStartTime = Date.now();
try {
let newContent = content;
// For append or prepend modes, get the current content first
if (mode === 'append' || mode === 'prepend') {
const currentContent = await note.getContent();
if (mode === 'append') {
newContent = currentContent + '\n\n' + content;
log.info(`Appending content to existing note content`);
} else if (mode === 'prepend') {
newContent = content + '\n\n' + currentContent;
log.info(`Prepending content to existing note content`);
}
}
await note.setContent(newContent);
const contentDuration = Date.now() - contentStartTime;
log.info(`Updated note content in ${contentDuration}ms, new content length: ${newContent.length}`);
contentUpdateResult = `Content updated successfully (${mode} mode)`;
} catch (error: any) {
log.error(`Error updating note content: ${error.message || String(error)}`);
contentUpdateResult = `Failed to update content: ${error.message || 'Unknown error'}`;
}
}
// Return the results
return {
success: true,
noteId: note.noteId,
title: note.title,
titleUpdate: titleUpdateResult || 'No title update requested',
contentUpdate: contentUpdateResult || 'No content update requested',
message: `Note "${note.title}" updated successfully`
};
} catch (error: any) {
log.error(`Error executing update_note tool: ${error.message || String(error)}`);
return `Error: ${error.message || String(error)}`;
}
}
}

View File

@ -1,121 +0,0 @@
/**
* Read Note Tool
*
* This tool allows the LLM to read the content of a specific note.
*/
import type { Tool, ToolHandler } from './tool_interfaces.js';
import log from '../../log.js';
import becca from '../../../becca/becca.js';
// Define type for note response
interface NoteResponse {
noteId: string;
title: string;
type: string;
content: string | Buffer;
attributes?: Array<{
name: string;
value: string;
type: string;
}>;
}
// Error type guard
function isError(error: unknown): error is Error {
return error instanceof Error || (typeof error === 'object' &&
error !== null && 'message' in error);
}
/**
* Definition of the read note tool
*/
export const readNoteToolDefinition: Tool = {
type: 'function',
function: {
name: 'read_note',
description: 'Read the content of a specific note by its ID',
parameters: {
type: 'object',
properties: {
noteId: {
type: 'string',
description: 'The system ID of the note to read (not the title). This is a unique identifier like "abc123def456" that must be used to access a specific note.'
},
includeAttributes: {
type: 'boolean',
description: 'Whether to include note attributes in the response (default: false)'
}
},
required: ['noteId']
}
}
};
/**
* Read note tool implementation
*/
export class ReadNoteTool implements ToolHandler {
public definition: Tool = readNoteToolDefinition;
/**
* Execute the read note tool
*/
public async execute(args: { noteId: string, includeAttributes?: boolean }): Promise<string | object> {
try {
const { noteId, includeAttributes = false } = args;
log.info(`Executing read_note tool - NoteID: "${noteId}", IncludeAttributes: ${includeAttributes}`);
// Get the note from becca
const note = becca.notes[noteId];
if (!note) {
log.info(`Note with ID ${noteId} not found - returning error`);
return `Error: Note with ID ${noteId} not found`;
}
log.info(`Found note: "${note.title}" (Type: ${note.type})`);
// Get note content
const startTime = Date.now();
const content = await note.getContent();
const duration = Date.now() - startTime;
log.info(`Retrieved note content in ${duration}ms, content length: ${content?.length || 0} chars`);
// Prepare the response
const response: NoteResponse = {
noteId: note.noteId,
title: note.title,
type: note.type,
content: content || ''
};
// Include attributes if requested
if (includeAttributes) {
const attributes = note.getOwnedAttributes();
log.info(`Including ${attributes.length} attributes in response`);
response.attributes = attributes.map(attr => ({
name: attr.name,
value: attr.value,
type: attr.type
}));
if (attributes.length > 0) {
// Log some example attributes
attributes.slice(0, 3).forEach((attr, index) => {
log.info(`Attribute ${index + 1}: ${attr.name}=${attr.value} (${attr.type})`);
});
}
}
return response;
} catch (error: unknown) {
const errorMessage = isError(error) ? error.message : String(error);
log.error(`Error executing read_note tool: ${errorMessage}`);
return `Error: ${errorMessage}`;
}
}
}

View File

@ -1,493 +0,0 @@
/**
* Relationship Tool
*
* This tool allows the LLM to create, identify, or modify relationships between notes.
*/
import type { Tool, ToolHandler } from './tool_interfaces.js';
import log from '../../log.js';
import becca from '../../../becca/becca.js';
import attributes from '../../attributes.js';
import aiServiceManager from '../ai_service_manager.js';
import { SEARCH_CONSTANTS } from '../constants/search_constants.js';
import searchService from '../../search/services/search.js';
// Define types locally for relationship tool
interface Backlink {
noteId: string;
title: string;
relationName: string;
sourceNoteId: string;
sourceTitle: string;
}
interface RelatedNote {
noteId: string;
title: string;
similarity: number;
relationName: string;
targetNoteId: string;
targetTitle: string;
}
interface Suggestion {
targetNoteId: string;
targetTitle: string;
similarity: number;
suggestedRelation: string;
}
/**
* Definition of the relationship tool
*/
export const relationshipToolDefinition: Tool = {
type: 'function',
function: {
name: 'manage_relationships',
description: 'Create, list, or modify relationships between notes',
parameters: {
type: 'object',
properties: {
action: {
type: 'string',
description: 'Action to perform on relationships',
enum: ['create', 'list', 'find_related', 'suggest']
},
sourceNoteId: {
type: 'string',
description: 'System ID of the source note for the relationship (not the title). This is a unique identifier like "abc123def456".'
},
targetNoteId: {
type: 'string',
description: 'System ID of the target note for the relationship (not the title). This is a unique identifier like "abc123def456".'
},
relationName: {
type: 'string',
description: 'Name of the relation (for create action, e.g., "references", "belongs to", "depends on")'
},
limit: {
type: 'number',
description: 'Maximum number of relationships to return (for list action)'
}
},
required: ['action', 'sourceNoteId']
}
}
};
/**
* Relationship tool implementation
*/
export class RelationshipTool implements ToolHandler {
public definition: Tool = relationshipToolDefinition;
/**
* Execute the relationship tool
*/
public async execute(args: {
action: 'create' | 'list' | 'find_related' | 'suggest',
sourceNoteId: string,
targetNoteId?: string,
relationName?: string,
limit?: number
}): Promise<string | object> {
try {
const { action, sourceNoteId, targetNoteId, relationName, limit = 10 } = args;
log.info(`Executing manage_relationships tool - Action: ${action}, SourceNoteId: ${sourceNoteId}`);
// Get the source note from becca
const sourceNote = becca.notes[sourceNoteId];
if (!sourceNote) {
log.info(`Source note with ID ${sourceNoteId} not found - returning error`);
return `Error: Source note with ID ${sourceNoteId} not found`;
}
log.info(`Found source note: "${sourceNote.title}" (Type: ${sourceNote.type})`);
// Handle different actions
if (action === 'create') {
return await this.createRelationship(sourceNote, targetNoteId, relationName);
} else if (action === 'list') {
return await this.listRelationships(sourceNote, limit);
} else if (action === 'find_related') {
return await this.findRelatedNotes(sourceNote, limit);
} else if (action === 'suggest') {
return await this.suggestRelationships(sourceNote, limit);
} else {
return `Error: Unsupported action "${action}". Supported actions are: create, list, find_related, suggest`;
}
} catch (error: any) {
log.error(`Error executing manage_relationships tool: ${error.message || String(error)}`);
return `Error: ${error.message || String(error)}`;
}
}
/**
* Create a relationship between notes
*/
private async createRelationship(sourceNote: any, targetNoteId?: string, relationName?: string): Promise<object> {
if (!targetNoteId) {
return {
success: false,
message: 'Target note ID is required for create action'
};
}
if (!relationName) {
return {
success: false,
message: 'Relation name is required for create action'
};
}
// Get the target note from becca
const targetNote = becca.notes[targetNoteId];
if (!targetNote) {
log.info(`Target note with ID ${targetNoteId} not found - returning error`);
return {
success: false,
message: `Target note with ID ${targetNoteId} not found`
};
}
log.info(`Found target note: "${targetNote.title}" (Type: ${targetNote.type})`);
try {
// Check if relationship already exists
const existingRelations = sourceNote.getRelationTargets(relationName);
for (const existingNote of existingRelations) {
if (existingNote.noteId === targetNoteId) {
log.info(`Relationship ${relationName} already exists from "${sourceNote.title}" to "${targetNote.title}"`);
return {
success: false,
sourceNoteId: sourceNote.noteId,
sourceTitle: sourceNote.title,
targetNoteId: targetNote.noteId,
targetTitle: targetNote.title,
relationName: relationName,
message: `Relationship ${relationName} already exists from "${sourceNote.title}" to "${targetNote.title}"`
};
}
}
// Create the relationship attribute
const startTime = Date.now();
await attributes.createRelation(sourceNote.noteId, relationName, targetNote.noteId);
const duration = Date.now() - startTime;
log.info(`Created relationship ${relationName} from "${sourceNote.title}" to "${targetNote.title}" in ${duration}ms`);
return {
success: true,
sourceNoteId: sourceNote.noteId,
sourceTitle: sourceNote.title,
targetNoteId: targetNote.noteId,
targetTitle: targetNote.title,
relationName: relationName,
message: `Created relationship ${relationName} from "${sourceNote.title}" to "${targetNote.title}"`
};
} catch (error: any) {
log.error(`Error creating relationship: ${error.message || String(error)}`);
throw error;
}
}
/**
* List relationships for a note
*/
private async listRelationships(sourceNote: any, limit: number): Promise<object> {
try {
// Get outgoing relationships (where this note is the source)
const outgoingAttributes = sourceNote.getAttributes()
.filter((attr: any) => attr.type === 'relation')
.slice(0, limit);
const outgoingRelations: RelatedNote[] = [];
for (const attr of outgoingAttributes) {
const targetNote = becca.notes[attr.value];
if (targetNote) {
outgoingRelations.push({
noteId: targetNote.noteId,
title: targetNote.title,
similarity: 1.0,
relationName: attr.name,
targetNoteId: targetNote.noteId,
targetTitle: targetNote.title
});
}
}
// Get incoming relationships (where this note is the target)
// Since becca.findNotesWithRelation doesn't exist, use attributes to find notes with relation
const incomingRelations: Backlink[] = [];
// Find all attributes of type relation that point to this note
const relationAttributes = sourceNote.getTargetRelations();
for (const attr of relationAttributes) {
if (attr.type === 'relation') {
const sourceOfRelation = attr.getNote();
if (sourceOfRelation && !sourceOfRelation.isDeleted) {
incomingRelations.push({
noteId: sourceOfRelation.noteId,
title: sourceOfRelation.title,
relationName: attr.name,
sourceNoteId: sourceOfRelation.noteId,
sourceTitle: sourceOfRelation.title
});
if (incomingRelations.length >= limit) {
break;
}
}
}
}
log.info(`Found ${outgoingRelations.length} outgoing and ${incomingRelations.length} incoming relationships`);
return {
success: true,
noteId: sourceNote.noteId,
title: sourceNote.title,
outgoingRelations: outgoingRelations,
incomingRelations: incomingRelations.slice(0, limit),
message: `Found ${outgoingRelations.length} outgoing and ${incomingRelations.length} incoming relationships for "${sourceNote.title}"`
};
} catch (error: any) {
log.error(`Error listing relationships: ${error.message || String(error)}`);
throw error;
}
}
/**
* Find related notes using TriliumNext's search service
*/
private async findRelatedNotes(sourceNote: any, limit: number): Promise<object> {
try {
log.info(`Using TriliumNext search to find notes related to "${sourceNote.title}"`);
// Get note content for search
const content = sourceNote.getContent();
const title = sourceNote.title;
// Create search queries from the note title and content
const searchQueries = [title];
// Extract key terms from content if available
if (content && typeof content === 'string') {
// Extract meaningful words from content (filter out common words)
const contentWords = content
.toLowerCase()
.split(/\s+/)
.filter(word => word.length > 3)
.filter(word => !/^(the|and|but|for|are|from|they|been|have|this|that|with|will|when|where|what|how)$/.test(word))
.slice(0, 10); // Take first 10 meaningful words
if (contentWords.length > 0) {
searchQueries.push(contentWords.join(' '));
}
}
// Execute searches and combine results
const searchStartTime = Date.now();
const allResults = new Map<string, any>();
let searchDuration = 0;
for (const query of searchQueries) {
try {
const results = searchService.searchNotes(query, {
includeArchivedNotes: false,
fastSearch: false // Use full search for better results
});
// Add results to our map (avoiding duplicates)
for (const note of results.slice(0, limit * 2)) { // Get more to account for duplicates
if (note.noteId !== sourceNote.noteId && !note.isDeleted) {
allResults.set(note.noteId, {
noteId: note.noteId,
title: note.title,
similarity: 0.8 // Base similarity for search results
});
}
}
} catch (error) {
log.error(`Search query failed: ${query} - ${error}`);
}
}
searchDuration = Date.now() - searchStartTime;
// Also add notes that are directly related via attributes
const directlyRelatedNotes = this.getDirectlyRelatedNotes(sourceNote);
for (const note of directlyRelatedNotes) {
if (!allResults.has(note.noteId)) {
allResults.set(note.noteId, {
noteId: note.noteId,
title: note.title,
similarity: 1.0 // Higher similarity for directly related notes
});
}
}
const relatedNotes = Array.from(allResults.values())
.sort((a, b) => b.similarity - a.similarity) // Sort by similarity
.slice(0, limit);
log.info(`Found ${relatedNotes.length} related notes in ${searchDuration}ms`);
return {
success: true,
noteId: sourceNote.noteId,
title: sourceNote.title,
relatedNotes: relatedNotes,
message: `Found ${relatedNotes.length} notes related to "${sourceNote.title}" using search and relationship analysis`
};
} catch (error: any) {
log.error(`Error finding related notes: ${error.message || String(error)}`);
throw error;
}
}
/**
* Get notes that are directly related through attributes/relations
*/
private getDirectlyRelatedNotes(sourceNote: any): any[] {
const relatedNotes: any[] = [];
try {
// Get outgoing relations
const outgoingAttributes = sourceNote.getAttributes().filter((attr: any) => attr.type === 'relation');
for (const attr of outgoingAttributes) {
const targetNote = becca.notes[attr.value];
if (targetNote && !targetNote.isDeleted) {
relatedNotes.push(targetNote);
}
}
// Get incoming relations
const incomingRelations = sourceNote.getTargetRelations();
for (const attr of incomingRelations) {
if (attr.type === 'relation') {
const sourceOfRelation = attr.getNote();
if (sourceOfRelation && !sourceOfRelation.isDeleted) {
relatedNotes.push(sourceOfRelation);
}
}
}
// Get parent and child notes
const parentNotes = sourceNote.getParentNotes();
for (const parent of parentNotes) {
if (!parent.isDeleted) {
relatedNotes.push(parent);
}
}
const childNotes = sourceNote.getChildNotes();
for (const child of childNotes) {
if (!child.isDeleted) {
relatedNotes.push(child);
}
}
} catch (error) {
log.error(`Error getting directly related notes: ${error}`);
}
return relatedNotes;
}
/**
* Suggest possible relationships based on content analysis
*/
private async suggestRelationships(sourceNote: any, limit: number): Promise<object> {
try {
// First, find related notes using vector search
const relatedResult = await this.findRelatedNotes(sourceNote, limit) as any;
if (!relatedResult.success || !relatedResult.relatedNotes || relatedResult.relatedNotes.length === 0) {
return {
success: false,
message: 'Could not find any related notes to suggest relationships'
};
}
// Get the AI service for relationship suggestion
const aiService = await aiServiceManager.getService();
log.info(`Using ${aiService.getName()} to suggest relationships for ${relatedResult.relatedNotes.length} related notes`);
// Get the source note content
const sourceContent = await sourceNote.getContent();
// Prepare suggestions
const suggestions: Suggestion[] = [];
for (const relatedNote of relatedResult.relatedNotes) {
try {
// Get the target note content
const targetNote = becca.notes[relatedNote.noteId];
const targetContent = await targetNote.getContent();
// Prepare a prompt for the AI service
const prompt = `Analyze the relationship between these two notes and suggest a descriptive relation name (like "references", "implements", "depends on", etc.)
SOURCE NOTE: "${sourceNote.title}"
${typeof sourceContent === 'string' ? sourceContent.substring(0, 300) : ''}
TARGET NOTE: "${targetNote.title}"
${typeof targetContent === 'string' ? targetContent.substring(0, 300) : ''}
Suggest the most appropriate relationship type that would connect the source note to the target note. Reply with ONLY the relationship name, nothing else.`;
// Get the suggestion
const completion = await aiService.generateChatCompletion([
{
role: 'system',
content: 'You analyze the relationship between notes and suggest a concise, descriptive relation name.'
},
{ role: 'user', content: prompt }
], {
temperature: SEARCH_CONSTANTS.TEMPERATURE.RELATIONSHIP_TOOL,
maxTokens: SEARCH_CONSTANTS.LIMITS.RELATIONSHIP_TOOL_MAX_TOKENS
});
// Extract just the relation name (remove any formatting or explanation)
const relationName = completion.text
.replace(/^["']|["']$/g, '') // Remove quotes
.replace(/^relationship:|\./gi, '') // Remove prefixes/suffixes
.trim();
suggestions.push({
targetNoteId: relatedNote.noteId,
targetTitle: relatedNote.title,
similarity: relatedNote.similarity,
suggestedRelation: relationName
});
log.info(`Suggested relationship "${relationName}" from "${sourceNote.title}" to "${targetNote.title}"`);
} catch (error: any) {
log.error(`Error generating suggestion: ${error.message || String(error)}`);
// Continue with other suggestions
}
}
return {
success: true,
noteId: sourceNote.noteId,
title: sourceNote.title,
suggestions: suggestions,
message: `Generated ${suggestions.length} relationship suggestions for "${sourceNote.title}"`
};
} catch (error: any) {
log.error(`Error suggesting relationships: ${error.message || String(error)}`);
throw error;
}
}
}

View File

@ -1,284 +0,0 @@
/**
* Search Notes Tool
*
* This tool allows the LLM to search for notes using semantic search.
*/
import type { Tool, ToolHandler } from './tool_interfaces.js';
import log from '../../log.js';
import aiServiceManager from '../ai_service_manager.js';
import becca from '../../../becca/becca.js';
import { ContextExtractor } from '../context/index.js';
/**
* Definition of the search notes tool
*/
export const searchNotesToolDefinition: Tool = {
type: 'function',
function: {
name: 'search_notes',
description: 'Search for notes in the database using semantic search. Returns notes most semantically related to the query. Use specific, descriptive queries for best results.',
parameters: {
type: 'object',
properties: {
query: {
type: 'string',
description: 'The search query to find semantically related notes. Be specific and descriptive for best results.'
},
parentNoteId: {
type: 'string',
description: 'Optional system ID of the parent note to restrict search to a specific branch (not the title). This is a unique identifier like "abc123def456". Do not use note titles here.'
},
maxResults: {
type: 'number',
description: 'Maximum number of results to return (default: 5)'
},
summarize: {
type: 'boolean',
description: 'Whether to provide summarized content previews instead of truncated ones (default: false)'
}
},
required: ['query']
}
}
};
/**
* Get or create the vector search tool dependency
* @returns The vector search tool or null if it couldn't be created
*/
async function getOrCreateVectorSearchTool(): Promise<any> {
try {
// Try to get the existing vector search tool
let vectorSearchTool = aiServiceManager.getVectorSearchTool();
if (vectorSearchTool) {
log.info(`Found existing vectorSearchTool`);
return vectorSearchTool;
}
// No existing tool, try to initialize it
log.info(`VectorSearchTool not found, attempting initialization`);
// Get agent tools manager and initialize it
const agentTools = aiServiceManager.getAgentTools();
if (agentTools && typeof agentTools.initialize === 'function') {
try {
// Force initialization to ensure it runs even if previously marked as initialized
await agentTools.initialize(true);
} catch (initError: any) {
log.error(`Failed to initialize agent tools: ${initError.message}`);
return null;
}
} else {
log.error('Agent tools manager not available');
return null;
}
// Try getting the vector search tool again after initialization
vectorSearchTool = aiServiceManager.getVectorSearchTool();
if (vectorSearchTool) {
log.info('Successfully created vectorSearchTool');
return vectorSearchTool;
} else {
log.error('Failed to create vectorSearchTool after initialization');
return null;
}
} catch (error: any) {
log.error(`Error getting or creating vectorSearchTool: ${error.message}`);
return null;
}
}
/**
* Search notes tool implementation
*/
export class SearchNotesTool implements ToolHandler {
public definition: Tool = searchNotesToolDefinition;
private contextExtractor: ContextExtractor;
constructor() {
this.contextExtractor = new ContextExtractor();
}
/**
* Get rich content preview for a note
* This provides a better preview than the simple truncation in VectorSearchTool
*/
private async getRichContentPreview(noteId: string, summarize: boolean): Promise<string> {
try {
const note = becca.getNote(noteId);
if (!note) {
return 'Note not found';
}
// Get the full content with proper formatting
const formattedContent = await this.contextExtractor.getNoteContent(noteId);
if (!formattedContent) {
return 'No content available';
}
// If summarization is requested
if (summarize) {
// Try to get an LLM service for summarization
try {
const llmService = await aiServiceManager.getService();
const messages = [
{
role: "system" as const,
content: "Summarize the following note content concisely while preserving key information. Keep your summary to about 3-4 sentences."
},
{
role: "user" as const,
content: `Note title: ${note.title}\n\nContent:\n${formattedContent}`
}
];
// Request summarization with safeguards to prevent recursion
const result = await llmService.generateChatCompletion(messages, {
temperature: 0.3,
maxTokens: 200,
// Type assertion to bypass type checking for special internal parameters
...(({
bypassFormatter: true,
bypassContextProcessing: true
} as Record<string, boolean>))
});
if (result && result.text) {
return result.text;
}
} catch (error) {
log.error(`Error summarizing content: ${error}`);
// Fall through to smart truncation if summarization fails
}
}
try {
// Fall back to smart truncation if summarization fails or isn't requested
const previewLength = Math.min(formattedContent.length, 600);
let preview = formattedContent.substring(0, previewLength);
// Only add ellipsis if we've truncated the content
if (previewLength < formattedContent.length) {
// Try to find a natural break point
const breakPoints = ['. ', '.\n', '\n\n', '\n', '. '];
for (const breakPoint of breakPoints) {
const lastBreak = preview.lastIndexOf(breakPoint);
if (lastBreak > previewLength * 0.6) { // At least 60% of the way through
preview = preview.substring(0, lastBreak + breakPoint.length);
break;
}
}
// Add ellipsis if truncated
preview += '...';
}
return preview;
} catch (error) {
log.error(`Error getting rich content preview: ${error}`);
return 'Error retrieving content preview';
}
} catch (error) {
log.error(`Error getting rich content preview: ${error}`);
return 'Error retrieving content preview';
}
}
/**
* Execute the search notes tool
*/
public async execute(args: {
query: string,
parentNoteId?: string,
maxResults?: number,
summarize?: boolean
}): Promise<string | object> {
try {
const {
query,
parentNoteId,
maxResults = 5,
summarize = false
} = args;
log.info(`Executing search_notes tool - Query: "${query}", ParentNoteId: ${parentNoteId || 'not specified'}, MaxResults: ${maxResults}, Summarize: ${summarize}`);
// Get the vector search tool from the AI service manager
const vectorSearchTool = await getOrCreateVectorSearchTool();
if (!vectorSearchTool) {
return `Error: Vector search tool is not available. The system may still be initializing or there could be a configuration issue.`;
}
log.info(`Retrieved vector search tool from AI service manager`);
// Check if searchNotes method exists
if (!vectorSearchTool.searchNotes || typeof vectorSearchTool.searchNotes !== 'function') {
log.error(`Vector search tool is missing searchNotes method`);
return `Error: Vector search tool is improperly configured (missing searchNotes method).`;
}
// Execute the search
log.info(`Performing semantic search for: "${query}"`);
const searchStartTime = Date.now();
const response = await vectorSearchTool.searchNotes(query, parentNoteId, maxResults);
const results: Array<Record<string, unknown>> = response?.matches ?? [];
const searchDuration = Date.now() - searchStartTime;
log.info(`Search completed in ${searchDuration}ms, found ${results.length} matching notes`);
if (results.length > 0) {
// Log top results
results.slice(0, 3).forEach((result: any, index: number) => {
log.info(`Result ${index + 1}: "${result.title}" (similarity: ${Math.round(result.similarity * 100)}%)`);
});
} else {
log.info(`No matching notes found for query: "${query}"`);
}
// Get enhanced previews for each result
const enhancedResults = await Promise.all(
results.map(async (result: any) => {
const noteId = result.noteId;
const preview = await this.getRichContentPreview(noteId, summarize);
return {
noteId: noteId,
title: result?.title as string || '[Unknown title]',
preview: preview,
score: result?.score as number,
dateCreated: result?.dateCreated as string,
dateModified: result?.dateModified as string,
similarity: Math.round(result.similarity * 100) / 100,
parentId: result.parentId
};
})
);
// Format the results
if (results.length === 0) {
return {
count: 0,
results: [],
query: query,
message: 'No notes found matching your query. Try using more general terms or try the keyword_search_notes tool with a different query. Note: Use the noteId (not the title) when performing operations on specific notes with other tools.'
};
} else {
return {
count: enhancedResults.length,
results: enhancedResults,
message: "Note: Use the noteId (not the title) when performing operations on specific notes with other tools."
};
}
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : String(error);
log.error(`Error executing search_notes tool: ${errorMessage}`);
return `Error: ${errorMessage}`;
}
}
}

View File

@ -1,179 +0,0 @@
/**
* Search Suggestion Tool
*
* This tool provides guidance on how to formulate different types of search queries in Trilium.
* It helps the LLM understand the correct syntax for various search scenarios.
*/
import type { Tool, ToolHandler } from './tool_interfaces.js';
import log from '../../log.js';
// Template types
type QueryTemplate = {
template: string;
description: string;
};
type SearchTypesMap = {
basic: QueryTemplate[];
attribute: QueryTemplate[];
content: QueryTemplate[];
relation: QueryTemplate[];
date: QueryTemplate[];
advanced: QueryTemplate[];
};
type SearchType = keyof SearchTypesMap;
/**
* Definition of the search suggestion tool
*/
export const searchSuggestionToolDefinition: Tool = {
type: 'function',
function: {
name: 'search_suggestion',
description: 'Get suggestions on how to formulate different types of search queries in Trilium. Use this when you need help constructing the right search syntax.',
parameters: {
type: 'object',
properties: {
searchType: {
type: 'string',
description: 'Type of search you want suggestions for',
enum: [
'basic',
'attribute',
'content',
'relation',
'date',
'advanced'
]
},
userQuery: {
type: 'string',
description: 'The user\'s original query or description of what they want to search for'
}
},
required: ['searchType']
}
}
};
/**
* Search suggestion tool implementation
*/
export class SearchSuggestionTool implements ToolHandler {
public definition: Tool = searchSuggestionToolDefinition;
// Example query templates for each search type
private queryTemplates: SearchTypesMap = {
basic: [
{ template: '"{term1}"', description: 'Exact phrase search' },
{ template: '{term1} {term2}', description: 'Find notes containing both terms' },
{ template: '{term1} OR {term2}', description: 'Find notes containing either term' }
],
attribute: [
{ template: '#{attributeName}', description: 'Find notes with a specific label' },
{ template: '#{attributeName} = {value}', description: 'Find notes with label equal to value' },
{ template: '#{attributeName} >= {value}', description: 'Find notes with numeric label greater or equal to value' },
{ template: '#{attributeName} *= {value}', description: 'Find notes with label containing value' },
{ template: '~{relationName}.title *= {value}', description: 'Find notes with relation to note whose title contains value' }
],
content: [
{ template: 'note.content *= "{text}"', description: 'Find notes containing specific text in content' },
{ template: 'note.content =* "{text}"', description: 'Find notes whose content starts with text' },
{ template: 'note.content %= "{regex}"', description: 'Find notes whose content matches regex pattern' }
],
relation: [
{ template: '~{relationName}', description: 'Find notes with a specific relation' },
{ template: '~{relationName}.title *= {text}', description: 'Find notes related to notes with title containing text' },
{ template: '~{relationName}.#tag', description: 'Find notes related to notes with specific label' }
],
date: [
{ template: '#dateNote = MONTH', description: 'Find notes with dateNote attribute equal to current month' },
{ template: '#dateNote >= TODAY-7', description: 'Find notes with dateNote in the last week' },
{ template: '#dateCreated >= YEAR-1', description: 'Find notes created within the last year' }
],
advanced: [
{ template: '#book AND #year >= 2020 AND note.content *= "important"', description: 'Combined attribute and content search' },
{ template: '#project AND (#status=active OR #status=pending)', description: 'Complex attribute condition' },
{ template: 'note.children.title *= {text}', description: 'Find notes whose children contain text in title' }
]
};
/**
* Execute the search suggestion tool
*/
public async execute(args: { searchType: string, userQuery?: string }): Promise<string | object> {
try {
const { searchType, userQuery = '' } = args;
log.info(`Executing search_suggestion tool - Type: "${searchType}", UserQuery: "${userQuery}"`);
// Validate search type
if (!this.isValidSearchType(searchType)) {
return {
error: `Invalid search type: ${searchType}`,
validTypes: Object.keys(this.queryTemplates)
};
}
// Generate suggestions based on search type and user query
const templates = this.queryTemplates[searchType as SearchType];
// Extract potential terms from the user query
const terms = userQuery
.split(/\s+/)
.filter(term => term.length > 2)
.map(term => term.replace(/[^\w\s]/g, ''));
// Fill templates with user terms if available
const suggestions = templates.map((template: QueryTemplate) => {
let filledTemplate = template.template;
// Try to fill in term1, term2, etc.
if (terms.length > 0) {
for (let i = 0; i < Math.min(terms.length, 3); i++) {
filledTemplate = filledTemplate.replace(`{term${i+1}}`, terms[i]);
}
}
// For attribute/relation examples, try to use something meaningful
if (searchType === 'attribute' || searchType === 'relation') {
// These are common attribute/relation names in note-taking contexts
const commonAttributes = ['tag', 'category', 'status', 'priority', 'project', 'area', 'year'];
filledTemplate = filledTemplate.replace('{attributeName}', commonAttributes[Math.floor(Math.random() * commonAttributes.length)]);
filledTemplate = filledTemplate.replace('{relationName}', 'parent');
}
// Fill remaining placeholders with generic examples
filledTemplate = filledTemplate
.replace('{text}', terms[0] || 'example')
.replace('{value}', terms[1] || 'value')
.replace('{regex}', '[a-z]+');
return {
query: filledTemplate,
description: template.description
};
});
return {
searchType,
userQuery,
suggestions,
note: "Use these suggestions with keyword_search_notes or attribute_search tools to find relevant notes."
};
} catch (error: any) {
log.error(`Error executing search_suggestion tool: ${error.message || String(error)}`);
return `Error: ${error.message || String(error)}`;
}
}
/**
* Check if a search type is valid
*/
private isValidSearchType(searchType: string): searchType is SearchType {
return Object.keys(this.queryTemplates).includes(searchType);
}
}

View File

@ -2,25 +2,11 @@
* Tool Initializer
*
* This module initializes all available tools for the LLM to use.
* Supports both legacy (v1) and consolidated (v2) tool sets.
* Uses consolidated (v2) tool set for optimal performance.
*/
import toolRegistry from './tool_registry.js';
import { SearchNotesTool } from './search_notes_tool.js';
import { KeywordSearchTool } from './keyword_search_tool.js';
import { AttributeSearchTool } from './attribute_search_tool.js';
import { SearchSuggestionTool } from './search_suggestion_tool.js';
import { ReadNoteTool } from './read_note_tool.js';
import { NoteCreationTool } from './note_creation_tool.js';
import { NoteUpdateTool } from './note_update_tool.js';
import { ContentExtractionTool } from './content_extraction_tool.js';
import { RelationshipTool } from './relationship_tool.js';
import { AttributeManagerTool } from './attribute_manager_tool.js';
import { CalendarIntegrationTool } from './calendar_integration_tool.js';
import { NoteSummarizationTool } from './note_summarization_tool.js';
import { initializeConsolidatedTools } from './tool_initializer_v2.js';
import log from '../../log.js';
import options from '../../options.js';
// Error type guard
function isError(error: unknown): error is Error {
@ -28,80 +14,14 @@ function isError(error: unknown): error is Error {
error !== null && 'message' in error);
}
/**
* Check if consolidated tools should be used
*/
function shouldUseConsolidatedTools(): boolean {
try {
// Check for feature flag in options
const useConsolidated = options.getOption('llm.useConsolidatedTools');
// Default to true (use consolidated tools by default)
if (useConsolidated === undefined || useConsolidated === null) {
return true;
}
return useConsolidated === 'true' || useConsolidated === true;
} catch (error) {
// If option doesn't exist or error reading, default to true (consolidated)
log.info('LLM consolidated tools option not found, defaulting to true (consolidated tools)');
return true;
}
}
/**
* Initialize all tools for the LLM (legacy v1)
*/
export async function initializeLegacyTools(): Promise<void> {
try {
log.info('Initializing LLM tools (legacy v1)...');
// Register search and discovery tools
toolRegistry.registerTool(new SearchNotesTool()); // Semantic search
toolRegistry.registerTool(new KeywordSearchTool()); // Keyword-based search
toolRegistry.registerTool(new AttributeSearchTool()); // Attribute-specific search
toolRegistry.registerTool(new SearchSuggestionTool()); // Search syntax helper
toolRegistry.registerTool(new ReadNoteTool()); // Read note content
// Register note creation and manipulation tools
toolRegistry.registerTool(new NoteCreationTool()); // Create new notes
toolRegistry.registerTool(new NoteUpdateTool()); // Update existing notes
toolRegistry.registerTool(new NoteSummarizationTool()); // Summarize note content
// Register attribute and relationship tools
toolRegistry.registerTool(new AttributeManagerTool()); // Manage note attributes
toolRegistry.registerTool(new RelationshipTool()); // Manage note relationships
// Register content analysis tools
toolRegistry.registerTool(new ContentExtractionTool()); // Extract info from note content
toolRegistry.registerTool(new CalendarIntegrationTool()); // Calendar-related operations
// Log registered tools
const toolCount = toolRegistry.getAllTools().length;
const toolNames = toolRegistry.getAllTools().map(tool => tool.definition.function.name).join(', ');
log.info(`Successfully registered ${toolCount} LLM tools (legacy): ${toolNames}`);
} catch (error: unknown) {
const errorMessage = isError(error) ? error.message : String(error);
log.error(`Error initializing LLM tools: ${errorMessage}`);
// Don't throw, just log the error to prevent breaking the pipeline
}
}
/**
* Initialize all tools for the LLM
* Routes to either consolidated (v2) or legacy (v1) based on feature flag
* Uses consolidated (v2) tools with 4 tools, ~600 tokens saved vs legacy
*/
export async function initializeTools(): Promise<void> {
try {
const useConsolidated = shouldUseConsolidatedTools();
if (useConsolidated) {
log.info('Using consolidated tools (v2) - 4 tools, ~600 tokens saved');
await initializeConsolidatedTools();
} else {
log.info('Using legacy tools (v1) - 12 tools');
await initializeLegacyTools();
}
log.info('Initializing LLM tools (consolidated v2) - 4 tools, ~600 tokens saved');
await initializeConsolidatedTools();
} catch (error: unknown) {
const errorMessage = isError(error) ? error.message : String(error);
log.error(`Error initializing LLM tools: ${errorMessage}`);
@ -110,7 +30,5 @@ export async function initializeTools(): Promise<void> {
}
export default {
initializeTools,
initializeLegacyTools,
shouldUseConsolidatedTools
initializeTools
};