Compare commits

1 Commits

Author SHA1 Message Date
af440dcbc7 Add tool calls with agentic-gpt 2026-02-03 14:00:37 +00:00
4 changed files with 858 additions and 70 deletions

317
src/agentic-gpt.ts Normal file
View File

@@ -0,0 +1,317 @@
import { GPT } from './gpt.js';
import type { GPTConfig, GPTRequest, Message } from './gpt.ts';
import type { ToolDefinition, ToolCall, MessageChunk } from './gpt-response.js';
/**
* Function signature for tool execution
*/
export type ToolFunction = (args: Record<string, unknown>) => Promise<unknown> | unknown;
/**
* Registration entry for a tool with its definition and execution function
*/
export type ToolRegistration = {
/**
* OpenAI-format tool definition
*/
definition: ToolDefinition;
/**
* Function to execute when the tool is called
*/
fn: ToolFunction;
};
/**
* Options for agentic tool execution
*/
export type AgenticOptions = {
/**
* Maximum number of tool execution loops to prevent infinite loops
* @default 10
*/
maxLoops?: number;
/**
* Whether to emit events for tool calls
* @default true
*/
emitEvents?: boolean;
};
/**
* AgenticGPT extends the base GPT class to provide automatic tool execution.
*
* This class:
* - Registers tools with their execution functions
* - Automatically executes tools when the LLM requests them
* - Loops until the LLM provides a final response without tool calls
* - Yields all chunks (content, reasoning, tool calls) during iteration
* - Emits events for monitoring tool execution
*
* @example
* ```typescript
* const agenticGPT = new AgenticGPT(config, [
* {
* definition: {
* type: 'function',
* function: {
* name: 'get_weather',
* description: 'Get current weather for a location',
* parameters: {
* type: 'object',
* properties: {
* location: { type: 'string', description: 'City name' }
* },
* required: ['location']
* }
* }
* },
* fn: async (args) => {
* return { temperature: 22, condition: 'sunny' };
* }
* }
* ]);
*
* // Automatic tool execution with streaming
* for await (const chunk of agenticGPT.sendWithTools({
* messages: [{ role: 'user', content: 'What is the weather in Paris?' }]
* })) {
* console.log(chunk);
* }
* ```
*/
export class AgenticGPT extends GPT {
/**
* Registry mapping tool names to their execution functions
*/
private toolRegistry: Map<string, ToolFunction> = new Map();
/**
* Tool definitions to send with requests
*/
private toolDefinitions: ToolDefinition[] = [];
/**
* Default options for agentic execution
*/
private options: Required<AgenticOptions>;
/**
* Creates a new AgenticGPT instance
*
* @param config - GPT configuration (API key, URL, model)
* @param tools - Array of tool registrations with definitions and execution functions
* @param options - Optional configuration for agentic behavior
*/
constructor(
config: GPTConfig,
tools: ToolRegistration[] = [],
options: AgenticOptions = {}
) {
super(config);
// Set default options
this.options = {
maxLoops: options.maxLoops ?? 10,
emitEvents: options.emitEvents ?? true,
};
// Register tools
for (const tool of tools) {
this.registerTool(tool);
}
}
/**
* Registers a tool with its definition and execution function
*
* @param tool - Tool registration with definition and execution function
*/
registerTool(tool: ToolRegistration): void {
const toolName = tool.definition.function.name;
this.toolRegistry.set(toolName, tool.fn);
this.toolDefinitions.push(tool.definition);
}
/**
* Unregisters a tool by name
*
* @param toolName - Name of the tool to unregister
*/
unregisterTool(toolName: string): void {
this.toolRegistry.delete(toolName);
this.toolDefinitions = this.toolDefinitions.filter(
def => def.function.name !== toolName
);
}
/**
* Sends a request with automatic tool execution.
*
* This method:
* 1. Sends the request to the GPT API with registered tools
* 2. Yields all chunks (content, reasoning, tool calls) as they arrive
* 3. When tool calls are detected, executes them automatically
* 4. Adds tool results to the conversation and continues
* 5. Repeats until the LLM provides a final response or max loops reached
*
* @param request - GPT request with messages and optional tool configuration
* @param options - Optional overrides for agentic options
* @returns Async iterator of message chunks
*
* @example
* ```typescript
* for await (const chunk of agenticGPT.sendWithTools({
* messages: [{ role: 'user', content: 'Tell me a joke and the weather' }]
* })) {
* if (chunk.type === 'content') {
* process.stdout.write(chunk.content);
* } else if (chunk.type === 'tool_call') {
* console.log('Calling tool:', chunk.toolCall.function.name);
* }
* }
* ```
*/
async *sendWithTools(
request: GPTRequest,
options?: AgenticOptions
): AsyncIterableIterator<MessageChunk> {
// Merge options
const effectiveOptions: Required<AgenticOptions> = {
maxLoops: options?.maxLoops ?? this.options.maxLoops,
emitEvents: options?.emitEvents ?? this.options.emitEvents,
};
// Start with the initial messages
let messages: Message[] = [...request.messages];
let loopCount = 0;
let continueLoop = true;
while (continueLoop && loopCount < effectiveOptions.maxLoops) {
loopCount++;
// Build the request with tools
const gptRequest: GPTRequest = {
messages,
tools: request.tools || this.toolDefinitions,
tool_choice: request.tool_choice || 'auto',
};
// Send to GPT and collect tool calls
const response = this.send(gptRequest);
const toolCalls: ToolCall[] = [];
let hasContent = false;
// Stream and collect chunks
for await (const chunk of response) {
yield chunk;
if (chunk.type === 'tool_call') {
toolCalls.push(chunk.toolCall);
} else if (chunk.type === 'content' && chunk.content) {
hasContent = true;
}
}
// If no tool calls, we're done
if (toolCalls.length === 0) {
continueLoop = false;
break;
}
// Add the assistant message with tool calls to the conversation
messages.push({
role: 'assistant',
content: hasContent ? null : null, // Content is null when tool calls are present
tool_calls: toolCalls,
});
// Execute tools and add results to messages
for (const toolCall of toolCalls) {
try {
const result = await this.executeTool(toolCall);
// Add tool result to messages
messages.push({
role: 'tool',
tool_call_id: toolCall.id,
name: toolCall.function.name,
content: typeof result === 'string' ? result : JSON.stringify(result),
});
// Emit event if enabled
if (effectiveOptions.emitEvents) {
this.emit('toolCalled', {
toolName: toolCall.function.name,
arguments: JSON.parse(toolCall.function.arguments),
result,
});
}
} catch (error) {
// If tool execution fails, add error message
const errorMessage = error instanceof Error ? error.message : String(error);
messages.push({
role: 'tool',
tool_call_id: toolCall.id,
name: toolCall.function.name,
content: JSON.stringify({ error: errorMessage }),
});
console.error(`Error executing tool ${toolCall.function.name}:`, error);
}
}
}
// Check if we hit max loops
if (loopCount >= effectiveOptions.maxLoops) {
console.warn(`AgenticGPT: Reached maximum loop count (${effectiveOptions.maxLoops})`);
}
}
/**
* Executes a tool call with the provided arguments
*
* @param toolCall - The tool call to execute
* @returns The result of the tool execution
* @throws Error if the tool is not found or execution fails
*/
private async executeTool(toolCall: ToolCall): Promise<unknown> {
const toolName = toolCall.function.name;
const fn = this.toolRegistry.get(toolName);
if (!fn) {
throw new Error(`Tool "${toolName}" not found in registry`);
}
let args: Record<string, unknown>;
try {
args = JSON.parse(toolCall.function.arguments);
} catch (error) {
throw new Error(`Invalid JSON arguments for tool "${toolName}": ${toolCall.function.arguments}`);
}
return await fn(args);
}
/**
* Gets all registered tool definitions
*
* @returns Array of tool definitions
*/
getToolDefinitions(): ToolDefinition[] {
return [...this.toolDefinitions];
}
/**
* Checks if a tool is registered
*
* @param toolName - Name of the tool to check
* @returns True if the tool is registered
*/
hasTool(toolName: string): boolean {
return this.toolRegistry.has(toolName);
}
}

View File

@@ -1,14 +1,82 @@
import type { SSEvent } from './sse-session.js';
export type MessageChunk = {
type: 'reasoning' | 'content';
reasoning_details?: string;
content: string;
}
/**
* JSON Schema type for tool parameter definitions
*/
export type JSONSchema = {
type: 'object' | 'string' | 'number' | 'boolean' | 'array' | 'null';
properties?: Record<string, JSONSchema>;
items?: JSONSchema;
required?: string[];
enum?: (string | number)[];
description?: string;
additionalProperties?: boolean;
[key: string]: unknown;
};
/**
* OpenAI-format tool definition using JSON Schema
*/
export type ToolDefinition = {
type: 'function';
function: {
name: string;
description: string;
parameters: JSONSchema;
strict?: boolean;
};
};
/**
* Represents a complete tool call from the LLM
*/
export type ToolCall = {
id: string;
type: 'function';
function: {
name: string;
arguments: string; // JSON string
};
};
/**
* Delta format for streaming tool calls
*/
export type ToolCallDelta = {
index: number;
id?: string;
type?: 'function';
function?: {
name?: string;
arguments?: string;
};
};
/**
* Message chunk types that can be yielded during streaming
*/
export type MessageChunk =
| {
type: 'reasoning';
reasoning_details?: string;
content: string;
}
| {
type: 'content';
content: string;
}
| {
type: 'tool_call';
toolCall: ToolCall;
};
/**
* Final result after consuming all chunks
*/
export type FinalResult = {
reasoning: string;
content: string;
toolCalls: ToolCall[];
}
export type GPTResponse = {
@@ -20,18 +88,20 @@ export type GPTResponse = {
choices: {
index: number;
delta: {
role: 'user' | 'assistant' | 'system';
content: string;
reasoning: string;
reasoning_details: {
role?: 'user' | 'assistant' | 'system';
content?: string | null;
reasoning?: string;
reasoning_details?: {
type: string;
summary: string;
}
};
tool_calls?: ToolCallDelta[];
};
finish_reason?: 'stop' | 'tool_calls' | 'length' | 'content_filter' | null;
}[];
finish_reason: 'stop' | 'tool_calls' | 'length' | 'content_filter' | null;
native_finish_reason: string | null;
usage: {
finish_reason?: 'stop' | 'tool_calls' | 'length' | 'content_filter' | null;
native_finish_reason?: string | null;
usage?: {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
@@ -57,6 +127,12 @@ export class MessageResponse implements PromiseLike<FinalResult> {
private resolveResult!: (value: FinalResult) => void;
private resultPromise: Promise<FinalResult>;
private iterator: AsyncIterable<SSEvent>;
/**
* Accumulates tool calls as they stream in from the API
* Key is the tool call index, value is the partially completed tool call
*/
private toolCallsInProgress: Map<number, Partial<ToolCall>> = new Map();
constructor(iterator: AsyncIterable<SSEvent>) {
this.iterator = iterator;
@@ -73,9 +149,13 @@ export class MessageResponse implements PromiseLike<FinalResult> {
this.iteratorConsumed = true;
for await (const rawChunk of this.iterator) {
const chunk = this.parseChunk(rawChunk);
this.chunks.push(chunk);
yield chunk;
const chunks = this.parseChunk(rawChunk);
// parseChunk may return multiple chunks (e.g., when tool calls complete)
for (const chunk of chunks) {
this.chunks.push(chunk);
yield chunk;
}
}
this.resolveResult(this.buildResult());
@@ -91,8 +171,12 @@ export class MessageResponse implements PromiseLike<FinalResult> {
(async () => {
for await (const rawChunk of this.iterator) {
const chunk = this.parseChunk(rawChunk);
this.chunks.push(chunk);
const chunks = this.parseChunk(rawChunk);
// parseChunk may return multiple chunks
for (const chunk of chunks) {
this.chunks.push(chunk);
}
}
this.resolveResult(this.buildResult());
@@ -114,22 +198,37 @@ export class MessageResponse implements PromiseLike<FinalResult> {
return {
reasoning: this.chunks
.filter(c => c.type === 'reasoning')
.map(c => c.content)
.map(c => 'content' in c ? c.content : '')
.join(''),
content: this.chunks
.filter(c => c.type === 'content')
.map(c => c.content)
.map(c => 'content' in c ? c.content : '')
.join(''),
toolCalls: this.chunks
.filter(c => c.type === 'tool_call')
.map(c => 'toolCall' in c ? c.toolCall : null)
.filter((tc): tc is ToolCall => tc !== null),
};
}
private parseChunk(rawChunk: SSEvent) {
/**
* Parses a raw SSE chunk and returns one or more MessageChunks
* May return multiple chunks when tool calls complete
*/
private parseChunk(rawChunk: SSEvent): MessageChunk[] {
// console.log('Raw Chunk:', rawChunk);
if (rawChunk.data === '[DONE]') {
return {
// When stream ends, flush any pending tool calls
const completedToolCalls = this.flushToolCalls();
if (completedToolCalls.length > 0) {
return completedToolCalls;
}
return [{
type: 'content',
content: '',
} as const;
}];
}
const data = JSON.parse(rawChunk.data) as GPTResponse;
@@ -140,18 +239,104 @@ export class MessageResponse implements PromiseLike<FinalResult> {
}
const delta = choice.delta;
const finishReason = choice.finish_reason || data.finish_reason;
// Handle tool calls
if (delta.tool_calls) {
this.processToolCallDeltas(delta.tool_calls);
// If finish_reason is 'tool_calls', all tool calls are complete
if (finishReason === 'tool_calls') {
return this.flushToolCalls();
}
// Otherwise, don't yield anything yet (still accumulating)
return [];
}
// Handle reasoning chunks
if (delta.reasoning) {
return {
const chunk: MessageChunk = {
type: 'reasoning',
content: delta.reasoning,
reasoning_details: delta.reasoning_details.summary,
} as const;
} else {
return {
};
// Add reasoning_details if present
if (delta.reasoning_details?.summary) {
chunk.reasoning_details = delta.reasoning_details.summary;
}
return [chunk];
}
// Handle content chunks
if (delta.content !== undefined && delta.content !== null) {
return [{
type: 'content',
content: delta.content,
} as const;
}];
}
// Empty chunk (e.g., role assignment)
return [];
}
/**
* Processes tool call deltas and accumulates them
*/
private processToolCallDeltas(deltas: ToolCallDelta[]): void {
for (const delta of deltas) {
const index = delta.index;
if (!this.toolCallsInProgress.has(index)) {
// Start a new tool call
this.toolCallsInProgress.set(index, {
id: delta.id || '',
type: 'function',
function: {
name: delta.function?.name || '',
arguments: delta.function?.arguments || '',
},
});
} else {
// Accumulate arguments for existing tool call
const existing = this.toolCallsInProgress.get(index)!;
if (delta.function?.arguments) {
existing.function!.arguments += delta.function.arguments;
}
// Update other fields if provided
if (delta.id) {
existing.id = delta.id;
}
if (delta.function?.name) {
existing.function!.name = delta.function.name;
}
}
}
}
/**
* Flushes all accumulated tool calls and returns them as chunks
*/
private flushToolCalls(): MessageChunk[] {
const chunks: MessageChunk[] = [];
// Convert accumulated tool calls to chunks
for (const [index, toolCall] of this.toolCallsInProgress.entries()) {
// Validate that the tool call is complete
if (toolCall.id && toolCall.function?.name && toolCall.function?.arguments !== undefined) {
chunks.push({
type: 'tool_call',
toolCall: toolCall as ToolCall,
});
}
}
// Clear the accumulator
this.toolCallsInProgress.clear();
return chunks;
}
}

View File

@@ -1,6 +1,7 @@
import { SSESession } from './sse-session.js';
import { EventEmitter } from './utils/event-emitter.js';
import { MessageResponse } from './gpt-response.js';
import type { ToolDefinition, ToolCall } from './gpt-response.js';
export type GPTEventMap = {
@@ -42,11 +43,47 @@ export type GPTConfig = {
model: string;
}
/**
* Message types that can be sent to the GPT API
*/
export type Message =
| {
role: 'user' | 'system';
content: string;
}
| {
role: 'assistant';
content: string | null;
tool_calls?: ToolCall[];
}
| {
role: 'tool';
tool_call_id: string;
name: string;
content: string;
};
/**
* Request configuration for GPT API calls
*/
export type GPTRequest = {
/**
* The messages to send to the GPT API
*/
messages: { role: 'user' | 'assistant' | 'system'; content: string }[];
messages: Message[];
/**
* Optional tool definitions for function calling
*/
tools?: ToolDefinition[];
/**
* Controls which (if any) tool is called by the model
* - 'auto' (default): model decides whether to call a tool
* - 'none': model will not call any tools
* - { type: 'function', function: { name: 'tool_name' } }: forces a specific tool call
*/
tool_choice?: 'auto' | 'none' | { type: 'function'; function: { name: string } };
}
export class GPT extends EventEmitter<GPTEventMap> {
@@ -56,23 +93,36 @@ export class GPT extends EventEmitter<GPTEventMap> {
/**
* Sends a message to the GPT API
* @param message - The message to send
* @param request - The request configuration including messages and optional tools
* @returns The response from the GPT API
*/
send(request: GPTRequest): MessageResponse {
const config = this.config;
const lazyIterator = (async function* () {
// Build the API request body
const requestBody: Record<string, unknown> = {
model: config.model,
messages: request.messages,
stream: true,
};
// Add tools if provided
if (request.tools && request.tools.length > 0) {
requestBody.tools = request.tools;
}
// Add tool_choice if provided
if (request.tool_choice) {
requestBody.tool_choice = request.tool_choice;
}
const session = await SSESession.from(config.apiUrl, {
headers: {
Authorization: `Bearer ${config.apiKey}`,
},
method: 'POST',
body: JSON.stringify({
model: config.model,
messages: request.messages,
stream: true,
}),
body: JSON.stringify(requestBody),
});
if (!session.messages) {

View File

@@ -1,39 +1,275 @@
/**
* Export all public types and classes
*/
export { GPT } from './gpt.js';
export type { GPTConfig, GPTRequest, Message, GPTEventMap } from './gpt.js';
export { AgenticGPT } from './agentic-gpt.js';
export type { ToolFunction, ToolRegistration, AgenticOptions } from './agentic-gpt.js';
export { MessageResponse } from './gpt-response.js';
export type {
MessageChunk,
FinalResult,
ToolDefinition,
ToolCall,
ToolCallDelta,
JSONSchema,
GPTResponse
} from './gpt-response.js';
import { GPT } from './gpt.js';
const gptConfig = {
apiKey: process.env.OPENROUTER_API_KEY || '',
apiUrl: 'https://openrouter.ai/api/v1/chat/completions',
model: 'x-ai/grok-4.1-fast',
}
const gpt = new GPT(gptConfig);
const request = gpt.send({ messages: [{ role: 'user', content: 'Hello, how are you?' }] });
let lastChunk = { type: 'reasoning' } as { type: 'reasoning' | 'content'; reasoning?: string; reasoning_details?: string; content: string };
for await (const chunk of request) {
if (lastChunk.type === 'reasoning' && chunk.type === 'content') {
process.stdout.write('\n')
}
process.stdout.write(chunk.content);
lastChunk = chunk;
}
console.log('\n');
console.log('--------------------------------');
console.log('Streaming Results Completed');
console.log('--------------------------------\n\n');
import { AgenticGPT } from './agentic-gpt.js';
import type { ToolDefinition } from './gpt-response.js';
/**
* Generate the full response and get the final result
* Examples demonstrating the different usage patterns
* Uncomment the example you want to run
*/
const response = await gpt.send({ messages: [{ role: 'user', content: 'Hello, how are you?' }] });
console.log(response);
console.log('\n');
console.log('--------------------------------');
console.log('Final Result Generated');
console.log('--------------------------------\n\n');
// =============================================================================
// Example 1: Basic GPT streaming (no tools)
// =============================================================================
async function basicStreamingExample() {
const gptConfig = {
apiKey: process.env.OPENROUTER_API_KEY || '',
apiUrl: 'https://openrouter.ai/api/v1/chat/completions',
model: 'x-ai/grok-4.1-fast',
};
const gpt = new GPT(gptConfig);
console.log('=== Basic Streaming Example ===\n');
const request = gpt.send({
messages: [{ role: 'user', content: 'Tell me a short joke about programming.' }]
});
let lastChunk = { type: 'reasoning' } as { type: 'reasoning' | 'content' | 'tool_call' };
for await (const chunk of request) {
// Handle different chunk types
if (chunk.type === 'reasoning') {
process.stdout.write(chunk.content);
} else if (chunk.type === 'content') {
if (lastChunk.type === 'reasoning') {
process.stdout.write('\n');
}
process.stdout.write(chunk.content);
}
lastChunk = chunk;
}
console.log('\n\n=== Streaming Completed ===\n');
}
// =============================================================================
// Example 2: Manual tool call handling (streaming with tools)
// =============================================================================
async function manualToolHandlingExample() {
const gptConfig = {
apiKey: process.env.OPENROUTER_API_KEY || '',
apiUrl: 'https://openrouter.ai/api/v1/chat/completions',
model: 'x-ai/grok-4.1-fast',
};
const gpt = new GPT(gptConfig);
console.log('=== Manual Tool Handling Example ===\n');
// Define a weather tool
const weatherTool: ToolDefinition = {
type: 'function' as const,
function: {
name: 'get_weather',
description: 'Get the current weather for a location',
parameters: {
type: 'object',
properties: {
location: {
type: 'string',
description: 'City name, e.g., "Paris" or "New York"',
},
units: {
type: 'string',
enum: ['celsius', 'fahrenheit'],
description: 'Temperature units',
},
},
required: ['location', 'units'],
additionalProperties: false,
},
},
};
const response = gpt.send({
messages: [{ role: 'user', content: 'What is the weather like in Paris?' }],
tools: [weatherTool],
});
console.log('Streaming response:\n');
for await (const chunk of response) {
if (chunk.type === 'content' && chunk.content) {
process.stdout.write(chunk.content);
} else if (chunk.type === 'tool_call') {
console.log('\n\nTool called:', chunk.toolCall.function.name);
console.log('Arguments:', chunk.toolCall.function.arguments);
console.log('(In a real app, you would execute the tool here and continue the conversation)');
}
}
console.log('\n\n=== Manual Tool Handling Completed ===\n');
}
// =============================================================================
// Example 3: Automatic tool execution with AgenticGPT
// =============================================================================
async function agenticToolExecutionExample() {
const gptConfig = {
apiKey: process.env.OPENROUTER_API_KEY || '',
apiUrl: 'https://openrouter.ai/api/v1/chat/completions',
model: 'x-ai/grok-4.1-fast',
};
console.log('=== Agentic Tool Execution Example ===\n');
// Define tools with their execution functions
const weatherTool: ToolDefinition = {
type: 'function' as const,
function: {
name: 'get_weather',
description: 'Get the current weather for a location',
parameters: {
type: 'object',
properties: {
location: {
type: 'string',
description: 'City name',
},
units: {
type: 'string',
enum: ['celsius', 'fahrenheit'],
description: 'Temperature units',
},
},
required: ['location', 'units'],
additionalProperties: false,
},
},
};
// Create weather function
const getWeather = async (args: Record<string, unknown>) => {
const location = args.location as string;
const units = args.units as string;
console.log(`\n[Executing get_weather(${location}, ${units})]`);
// Simulate API call
await new Promise(resolve => setTimeout(resolve, 500));
return {
location,
temperature: units === 'celsius' ? 22 : 72,
condition: 'sunny',
units,
};
};
// Create AgenticGPT with tools
const agenticGPT = new AgenticGPT(gptConfig, [
{ definition: weatherTool, fn: getWeather }
]);
// Listen for tool execution events
agenticGPT.on('toolCalled', (event) => {
console.log(`\n[Tool executed: ${event.toolName}]`);
console.log('[Result:', JSON.stringify(event.result), ']');
});
console.log('User: What is the weather in Paris and London?\n');
console.log('Assistant: ');
// Send request with automatic tool execution
for await (const chunk of agenticGPT.sendWithTools({
messages: [{
role: 'user',
content: 'What is the weather in Paris and London? Use celsius for Paris and fahrenheit for London.'
}],
})) {
if (chunk.type === 'content' && chunk.content) {
process.stdout.write(chunk.content);
} else if (chunk.type === 'tool_call') {
console.log(`\n[Calling tool: ${chunk.toolCall.function.name}]`);
}
}
console.log('\n\n=== Agentic Tool Execution Completed ===\n');
}
// =============================================================================
// Example 4: Thenable pattern with tool history
// =============================================================================
async function thenableWithToolHistoryExample() {
const gptConfig = {
apiKey: process.env.OPENROUTER_API_KEY || '',
apiUrl: 'https://openrouter.ai/api/v1/chat/completions',
model: 'x-ai/grok-4.1-fast',
};
console.log('=== Thenable Pattern with Tool History Example ===\n');
const calculatorTool: ToolDefinition = {
type: 'function' as const,
function: {
name: 'calculate',
description: 'Perform mathematical calculations',
parameters: {
type: 'object',
properties: {
expression: {
type: 'string',
description: 'Mathematical expression to evaluate, e.g., "2 + 2"',
},
},
required: ['expression'],
additionalProperties: false,
},
},
};
// For this example, we'll use the base GPT class with thenable pattern
const gpt = new GPT(gptConfig);
console.log('Requesting response without streaming...\n');
// Use thenable pattern (await the response directly)
// This will consume the stream automatically and return the final result
const result = await gpt.send({
messages: [{
role: 'user',
content: 'Use the calculator tool to calculate 2 + 2'
}],
tools: [calculatorTool],
});
console.log('\n=== Final Result ===');
console.log('Content:', result.content);
console.log('Reasoning:', result.reasoning || '(none)');
console.log('\nTool Calls Made:', result.toolCalls.length);
result.toolCalls.forEach((tc, i) => {
console.log(` ${i + 1}. ${tc.function.name}(${tc.function.arguments})`);
});
console.log('\n=== Thenable Example Completed ===\n');
}
// =============================================================================
// Run examples
// =============================================================================
// Uncomment the example you want to run:
await basicStreamingExample();
await manualToolHandlingExample();
await agenticToolExecutionExample();
await thenableWithToolHistoryExample();