Merge pull request 'agentic-gpt' (#1) from agentic-gpt into main
Reviewed-on: #1
This commit is contained in:
@@ -1,157 +0,0 @@
|
||||
import type { SSEvent } from './sse-session.js';
|
||||
|
||||
export type MessageChunk = {
|
||||
type: 'reasoning' | 'content';
|
||||
reasoning_details?: string;
|
||||
content: string;
|
||||
}
|
||||
|
||||
export type FinalResult = {
|
||||
reasoning: string;
|
||||
content: string;
|
||||
}
|
||||
|
||||
export type GPTResponse = {
|
||||
id: string;
|
||||
provider: string;
|
||||
model: string;
|
||||
object: string;
|
||||
created: number;
|
||||
choices: {
|
||||
index: number;
|
||||
delta: {
|
||||
role: 'user' | 'assistant' | 'system';
|
||||
content: string;
|
||||
reasoning: string;
|
||||
reasoning_details: {
|
||||
type: string;
|
||||
summary: string;
|
||||
}
|
||||
};
|
||||
}[];
|
||||
finish_reason: 'stop' | 'tool_calls' | 'length' | 'content_filter' | null;
|
||||
native_finish_reason: string | null;
|
||||
usage: {
|
||||
prompt_tokens: number;
|
||||
completion_tokens: number;
|
||||
total_tokens: number;
|
||||
cost: number;
|
||||
is_byok: boolean;
|
||||
prompt_tokens_details: {
|
||||
cached_tokens: number;
|
||||
};
|
||||
cost_details: {
|
||||
upstream_inference_cost: number;
|
||||
upstream_prompt_cost: number;
|
||||
upstream_inference_completions_cost: number;
|
||||
},
|
||||
completion_tokens_details: {
|
||||
reasoning_tokens: number;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
export class MessageResponse implements PromiseLike<FinalResult> {
|
||||
private chunks: MessageChunk[] = [];
|
||||
private iteratorConsumed = false;
|
||||
private resolveResult!: (value: FinalResult) => void;
|
||||
private resultPromise: Promise<FinalResult>;
|
||||
private iterator: AsyncIterable<SSEvent>;
|
||||
|
||||
constructor(iterator: AsyncIterable<SSEvent>) {
|
||||
this.iterator = iterator;
|
||||
this.resultPromise = new Promise(resolve => {
|
||||
this.resolveResult = resolve;
|
||||
});
|
||||
}
|
||||
|
||||
async *[Symbol.asyncIterator]() {
|
||||
if (this.iteratorConsumed) {
|
||||
throw new Error('GPTResponse can only be iterated once');
|
||||
}
|
||||
|
||||
this.iteratorConsumed = true;
|
||||
|
||||
for await (const rawChunk of this.iterator) {
|
||||
const chunk = this.parseChunk(rawChunk);
|
||||
this.chunks.push(chunk);
|
||||
yield chunk;
|
||||
}
|
||||
|
||||
this.resolveResult(this.buildResult());
|
||||
}
|
||||
|
||||
then<TResult1 = FinalResult, TResult2 = never>(
|
||||
onfulfilled?: ((value: FinalResult) => TResult1 | PromiseLike<TResult1>) | null,
|
||||
onrejected?: ((reason: unknown) => TResult2 | PromiseLike<TResult2>) | null,
|
||||
): Promise<TResult1 | TResult2> {
|
||||
// If not yet iterated, consume the iterator to get the result
|
||||
if (!this.iteratorConsumed) {
|
||||
this.iteratorConsumed = true;
|
||||
|
||||
(async () => {
|
||||
for await (const rawChunk of this.iterator) {
|
||||
const chunk = this.parseChunk(rawChunk);
|
||||
this.chunks.push(chunk);
|
||||
}
|
||||
|
||||
this.resolveResult(this.buildResult());
|
||||
})();
|
||||
}
|
||||
|
||||
return this.resultPromise.then(onfulfilled, onrejected);
|
||||
}
|
||||
|
||||
catch(onrejected?: ((reason: unknown) => never) | null): Promise<FinalResult> {
|
||||
return this.resultPromise.catch(onrejected);
|
||||
}
|
||||
|
||||
finally(onfinally?: (() => void) | undefined): Promise<FinalResult> {
|
||||
return this.resultPromise.finally(onfinally);
|
||||
}
|
||||
|
||||
private buildResult(): FinalResult {
|
||||
return {
|
||||
reasoning: this.chunks
|
||||
.filter(c => c.type === 'reasoning')
|
||||
.map(c => c.content)
|
||||
.join(''),
|
||||
content: this.chunks
|
||||
.filter(c => c.type === 'content')
|
||||
.map(c => c.content)
|
||||
.join(''),
|
||||
};
|
||||
}
|
||||
|
||||
private parseChunk(rawChunk: SSEvent) {
|
||||
// console.log('Raw Chunk:', rawChunk);
|
||||
if (rawChunk.data === '[DONE]') {
|
||||
return {
|
||||
type: 'content',
|
||||
content: '',
|
||||
} as const;
|
||||
}
|
||||
|
||||
const data = JSON.parse(rawChunk.data) as GPTResponse;
|
||||
const choice = data.choices[0];
|
||||
|
||||
if (!choice) {
|
||||
throw new Error('No choice found in chunk');
|
||||
}
|
||||
|
||||
const delta = choice.delta;
|
||||
|
||||
if (delta.reasoning) {
|
||||
return {
|
||||
type: 'reasoning',
|
||||
content: delta.reasoning,
|
||||
reasoning_details: delta.reasoning_details.summary,
|
||||
} as const;
|
||||
} else {
|
||||
return {
|
||||
type: 'content',
|
||||
content: delta.content,
|
||||
} as const;
|
||||
}
|
||||
}
|
||||
}
|
||||
87
src/gpt.ts
87
src/gpt.ts
@@ -1,87 +0,0 @@
|
||||
import { SSESession } from './sse-session.js';
|
||||
import { EventEmitter } from './utils/event-emitter.js';
|
||||
import { MessageResponse } from './gpt-response.js';
|
||||
|
||||
|
||||
export type GPTEventMap = {
|
||||
/**
|
||||
* Emitted when a message is sent
|
||||
*/
|
||||
messageSent: { mesasge: string };
|
||||
|
||||
/**
|
||||
* Emitted when a message chunk is received
|
||||
*/
|
||||
messageChunkReceived: { chunk: string };
|
||||
|
||||
/**
|
||||
* Emitted when a response is received
|
||||
*/
|
||||
responseReceived: { response: string };
|
||||
|
||||
/**
|
||||
* Emitted when a tool is called
|
||||
*/
|
||||
toolCalled: { toolName: string; arguments: Record<string, unknown>; result: unknown };
|
||||
}
|
||||
|
||||
export type GPTConfig = {
|
||||
/**
|
||||
* The API key to use for the GPT API
|
||||
*/
|
||||
apiKey: string;
|
||||
|
||||
/**
|
||||
* The API URL to use for the GPT API
|
||||
*/
|
||||
apiUrl: string;
|
||||
|
||||
/**
|
||||
* The model to use for the GPT API
|
||||
*/
|
||||
model: string;
|
||||
}
|
||||
|
||||
export type GPTRequest = {
|
||||
/**
|
||||
* The messages to send to the GPT API
|
||||
*/
|
||||
messages: { role: 'user' | 'assistant' | 'system'; content: string }[];
|
||||
}
|
||||
|
||||
export class GPT extends EventEmitter<GPTEventMap> {
|
||||
constructor(public config: GPTConfig) {
|
||||
super();
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a message to the GPT API
|
||||
* @param message - The message to send
|
||||
* @returns The response from the GPT API
|
||||
*/
|
||||
send(request: GPTRequest): MessageResponse {
|
||||
const config = this.config;
|
||||
|
||||
const lazyIterator = (async function* () {
|
||||
const session = await SSESession.from(config.apiUrl, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${config.apiKey}`,
|
||||
},
|
||||
method: 'POST',
|
||||
body: JSON.stringify({
|
||||
model: config.model,
|
||||
messages: request.messages,
|
||||
stream: true,
|
||||
}),
|
||||
});
|
||||
|
||||
if (!session.messages) {
|
||||
throw new Error('Failed to create SSE session');
|
||||
}
|
||||
|
||||
yield* session.messages;
|
||||
})();
|
||||
|
||||
return new MessageResponse(lazyIterator);
|
||||
}
|
||||
}
|
||||
317
src/gpt/agentic-gpt.ts
Normal file
317
src/gpt/agentic-gpt.ts
Normal file
@@ -0,0 +1,317 @@
|
||||
import { GPT } from './gpt.js';
|
||||
import type { GPTConfig, GPTRequest, Message } from './gpt.ts';
|
||||
import type { ToolDefinition, ToolCall, MessageChunk } from './gpt-response.js';
|
||||
|
||||
/**
|
||||
* Function signature for tool execution
|
||||
*/
|
||||
export type ToolFunction = (args: Record<string, unknown>) => Promise<unknown> | unknown;
|
||||
|
||||
/**
|
||||
* Registration entry for a tool with its definition and execution function
|
||||
*/
|
||||
export type ToolRegistration = {
|
||||
/**
|
||||
* OpenAI-format tool definition
|
||||
*/
|
||||
definition: ToolDefinition;
|
||||
|
||||
/**
|
||||
* Function to execute when the tool is called
|
||||
*/
|
||||
fn: ToolFunction;
|
||||
};
|
||||
|
||||
/**
|
||||
* Options for agentic tool execution
|
||||
*/
|
||||
export type AgenticOptions = {
|
||||
/**
|
||||
* Maximum number of tool execution loops to prevent infinite loops
|
||||
* @default 10
|
||||
*/
|
||||
maxLoops?: number;
|
||||
|
||||
/**
|
||||
* Whether to emit events for tool calls
|
||||
* @default true
|
||||
*/
|
||||
emitEvents?: boolean;
|
||||
};
|
||||
|
||||
/**
|
||||
* AgenticGPT extends the base GPT class to provide automatic tool execution.
|
||||
*
|
||||
* This class:
|
||||
* - Registers tools with their execution functions
|
||||
* - Automatically executes tools when the LLM requests them
|
||||
* - Loops until the LLM provides a final response without tool calls
|
||||
* - Yields all chunks (content, reasoning, tool calls) during iteration
|
||||
* - Emits events for monitoring tool execution
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* const agenticGPT = new AgenticGPT(config, [
|
||||
* {
|
||||
* definition: {
|
||||
* type: 'function',
|
||||
* function: {
|
||||
* name: 'get_weather',
|
||||
* description: 'Get current weather for a location',
|
||||
* parameters: {
|
||||
* type: 'object',
|
||||
* properties: {
|
||||
* location: { type: 'string', description: 'City name' }
|
||||
* },
|
||||
* required: ['location']
|
||||
* }
|
||||
* }
|
||||
* },
|
||||
* fn: async (args) => {
|
||||
* return { temperature: 22, condition: 'sunny' };
|
||||
* }
|
||||
* }
|
||||
* ]);
|
||||
*
|
||||
* // Automatic tool execution with streaming
|
||||
* for await (const chunk of agenticGPT.sendWithTools({
|
||||
* messages: [{ role: 'user', content: 'What is the weather in Paris?' }]
|
||||
* })) {
|
||||
* console.log(chunk);
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
export class AgenticGPT extends GPT {
|
||||
/**
|
||||
* Registry mapping tool names to their execution functions
|
||||
*/
|
||||
private toolRegistry: Map<string, ToolFunction> = new Map();
|
||||
|
||||
/**
|
||||
* Tool definitions to send with requests
|
||||
*/
|
||||
private toolDefinitions: ToolDefinition[] = [];
|
||||
|
||||
/**
|
||||
* Default options for agentic execution
|
||||
*/
|
||||
private options: Required<AgenticOptions>;
|
||||
|
||||
/**
|
||||
* Creates a new AgenticGPT instance
|
||||
*
|
||||
* @param config - GPT configuration (API key, URL, model)
|
||||
* @param tools - Array of tool registrations with definitions and execution functions
|
||||
* @param options - Optional configuration for agentic behavior
|
||||
*/
|
||||
constructor(
|
||||
config: GPTConfig,
|
||||
tools: ToolRegistration[] = [],
|
||||
options: AgenticOptions = {}
|
||||
) {
|
||||
super(config);
|
||||
|
||||
// Set default options
|
||||
this.options = {
|
||||
maxLoops: options.maxLoops ?? 10,
|
||||
emitEvents: options.emitEvents ?? true,
|
||||
};
|
||||
|
||||
// Register tools
|
||||
for (const tool of tools) {
|
||||
this.registerTool(tool);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Registers a tool with its definition and execution function
|
||||
*
|
||||
* @param tool - Tool registration with definition and execution function
|
||||
*/
|
||||
registerTool(tool: ToolRegistration): void {
|
||||
const toolName = tool.definition.function.name;
|
||||
this.toolRegistry.set(toolName, tool.fn);
|
||||
this.toolDefinitions.push(tool.definition);
|
||||
}
|
||||
|
||||
/**
|
||||
* Unregisters a tool by name
|
||||
*
|
||||
* @param toolName - Name of the tool to unregister
|
||||
*/
|
||||
unregisterTool(toolName: string): void {
|
||||
this.toolRegistry.delete(toolName);
|
||||
this.toolDefinitions = this.toolDefinitions.filter(
|
||||
def => def.function.name !== toolName
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request with automatic tool execution.
|
||||
*
|
||||
* This method:
|
||||
* 1. Sends the request to the GPT API with registered tools
|
||||
* 2. Yields all chunks (content, reasoning, tool calls) as they arrive
|
||||
* 3. When tool calls are detected, executes them automatically
|
||||
* 4. Adds tool results to the conversation and continues
|
||||
* 5. Repeats until the LLM provides a final response or max loops reached
|
||||
*
|
||||
* @param request - GPT request with messages and optional tool configuration
|
||||
* @param options - Optional overrides for agentic options
|
||||
* @returns Async iterator of message chunks
|
||||
*
|
||||
* @example
|
||||
* ```typescript
|
||||
* for await (const chunk of agenticGPT.sendWithTools({
|
||||
* messages: [{ role: 'user', content: 'Tell me a joke and the weather' }]
|
||||
* })) {
|
||||
* if (chunk.type === 'content') {
|
||||
* process.stdout.write(chunk.content);
|
||||
* } else if (chunk.type === 'tool_call') {
|
||||
* console.log('Calling tool:', chunk.toolCall.function.name);
|
||||
* }
|
||||
* }
|
||||
* ```
|
||||
*/
|
||||
async *sendWithTools(
|
||||
request: GPTRequest,
|
||||
options?: AgenticOptions
|
||||
): AsyncIterableIterator<MessageChunk> {
|
||||
// Merge options
|
||||
const effectiveOptions: Required<AgenticOptions> = {
|
||||
maxLoops: options?.maxLoops ?? this.options.maxLoops,
|
||||
emitEvents: options?.emitEvents ?? this.options.emitEvents,
|
||||
};
|
||||
|
||||
// Start with the initial messages
|
||||
let messages: Message[] = [...request.messages];
|
||||
let loopCount = 0;
|
||||
let continueLoop = true;
|
||||
|
||||
while (continueLoop && loopCount < effectiveOptions.maxLoops) {
|
||||
loopCount++;
|
||||
|
||||
// Build the request with tools
|
||||
const gptRequest: GPTRequest = {
|
||||
messages,
|
||||
tools: request.tools || this.toolDefinitions,
|
||||
tool_choice: request.tool_choice || 'auto',
|
||||
};
|
||||
|
||||
// Send to GPT and collect tool calls
|
||||
const response = this.send(gptRequest);
|
||||
const toolCalls: ToolCall[] = [];
|
||||
let hasContent = false;
|
||||
|
||||
// Stream and collect chunks
|
||||
for await (const chunk of response) {
|
||||
yield chunk;
|
||||
|
||||
if (chunk.type === 'tool_call') {
|
||||
toolCalls.push(chunk.toolCall);
|
||||
} else if (chunk.type === 'content' && chunk.content) {
|
||||
hasContent = true;
|
||||
}
|
||||
}
|
||||
|
||||
// If no tool calls, we're done
|
||||
if (toolCalls.length === 0) {
|
||||
continueLoop = false;
|
||||
break;
|
||||
}
|
||||
|
||||
// Add the assistant message with tool calls to the conversation
|
||||
messages.push({
|
||||
role: 'assistant',
|
||||
content: hasContent ? null : null, // Content is null when tool calls are present
|
||||
tool_calls: toolCalls,
|
||||
});
|
||||
|
||||
// Execute tools and add results to messages
|
||||
for (const toolCall of toolCalls) {
|
||||
try {
|
||||
const result = await this.executeTool(toolCall);
|
||||
|
||||
// Add tool result to messages
|
||||
messages.push({
|
||||
role: 'tool',
|
||||
tool_call_id: toolCall.id,
|
||||
name: toolCall.function.name,
|
||||
content: typeof result === 'string' ? result : JSON.stringify(result),
|
||||
});
|
||||
|
||||
// Emit event if enabled
|
||||
if (effectiveOptions.emitEvents) {
|
||||
this.emit('toolCalled', {
|
||||
toolName: toolCall.function.name,
|
||||
arguments: JSON.parse(toolCall.function.arguments),
|
||||
result,
|
||||
});
|
||||
}
|
||||
} catch (error) {
|
||||
// If tool execution fails, add error message
|
||||
const errorMessage = error instanceof Error ? error.message : String(error);
|
||||
|
||||
messages.push({
|
||||
role: 'tool',
|
||||
tool_call_id: toolCall.id,
|
||||
name: toolCall.function.name,
|
||||
content: JSON.stringify({ error: errorMessage }),
|
||||
});
|
||||
|
||||
console.error(`Error executing tool ${toolCall.function.name}:`, error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we hit max loops
|
||||
if (loopCount >= effectiveOptions.maxLoops) {
|
||||
console.warn(`AgenticGPT: Reached maximum loop count (${effectiveOptions.maxLoops})`);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes a tool call with the provided arguments
|
||||
*
|
||||
* @param toolCall - The tool call to execute
|
||||
* @returns The result of the tool execution
|
||||
* @throws Error if the tool is not found or execution fails
|
||||
*/
|
||||
private async executeTool(toolCall: ToolCall): Promise<unknown> {
|
||||
const toolName = toolCall.function.name;
|
||||
const fn = this.toolRegistry.get(toolName);
|
||||
|
||||
if (!fn) {
|
||||
throw new Error(`Tool "${toolName}" not found in registry`);
|
||||
}
|
||||
|
||||
let args: Record<string, unknown>;
|
||||
|
||||
try {
|
||||
args = JSON.parse(toolCall.function.arguments);
|
||||
} catch (error) {
|
||||
throw new Error(`Invalid JSON arguments for tool "${toolName}": ${toolCall.function.arguments}`);
|
||||
}
|
||||
|
||||
return await fn(args);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets all registered tool definitions
|
||||
*
|
||||
* @returns Array of tool definitions
|
||||
*/
|
||||
getToolDefinitions(): ToolDefinition[] {
|
||||
return [...this.toolDefinitions];
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if a tool is registered
|
||||
*
|
||||
* @param toolName - Name of the tool to check
|
||||
* @returns True if the tool is registered
|
||||
*/
|
||||
hasTool(toolName: string): boolean {
|
||||
return this.toolRegistry.has(toolName);
|
||||
}
|
||||
}
|
||||
297
src/gpt/gpt-response.ts
Normal file
297
src/gpt/gpt-response.ts
Normal file
@@ -0,0 +1,297 @@
|
||||
import type { SSEvent } from '../utils/sse-session.js';
|
||||
|
||||
import { ThenableIterator } from '../utils/thenable-iterator.js';
|
||||
|
||||
/**
|
||||
* JSON Schema type for tool parameter definitions
|
||||
*/
|
||||
export type JSONSchema = {
|
||||
type: 'object' | 'string' | 'number' | 'boolean' | 'array' | 'null';
|
||||
properties?: Record<string, JSONSchema>;
|
||||
items?: JSONSchema;
|
||||
required?: string[];
|
||||
enum?: (string | number)[];
|
||||
description?: string;
|
||||
additionalProperties?: boolean;
|
||||
[key: string]: unknown;
|
||||
};
|
||||
|
||||
/**
|
||||
* OpenAI-format tool definition using JSON Schema
|
||||
*/
|
||||
export type ToolDefinition = {
|
||||
type: 'function';
|
||||
function: {
|
||||
name: string;
|
||||
description: string;
|
||||
parameters: JSONSchema;
|
||||
strict?: boolean;
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Represents a complete tool call from the LLM
|
||||
*/
|
||||
export type ToolCall = {
|
||||
id: string;
|
||||
type: 'function';
|
||||
function: {
|
||||
name: string;
|
||||
arguments: string; // JSON string
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Delta format for streaming tool calls
|
||||
*/
|
||||
export type ToolCallDelta = {
|
||||
index: number;
|
||||
id?: string;
|
||||
type?: 'function';
|
||||
function?: {
|
||||
name?: string;
|
||||
arguments?: string;
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Message chunk types that can be yielded during streaming
|
||||
*/
|
||||
export type MessageChunk =
|
||||
| {
|
||||
type: 'reasoning';
|
||||
reasoning_details?: string;
|
||||
content: string;
|
||||
}
|
||||
| {
|
||||
type: 'content';
|
||||
content: string;
|
||||
}
|
||||
| {
|
||||
type: 'tool_call';
|
||||
toolCall: ToolCall;
|
||||
};
|
||||
|
||||
/**
|
||||
* Final result after consuming all chunks
|
||||
*/
|
||||
export type FinalResult = {
|
||||
reasoning: string;
|
||||
content: string;
|
||||
toolCalls: ToolCall[];
|
||||
}
|
||||
|
||||
export type GPTResponse = {
|
||||
id: string;
|
||||
provider: string;
|
||||
model: string;
|
||||
object: string;
|
||||
created: number;
|
||||
choices: {
|
||||
index: number;
|
||||
delta: {
|
||||
role?: 'user' | 'assistant' | 'system';
|
||||
content?: string | null;
|
||||
reasoning?: string;
|
||||
reasoning_details?: {
|
||||
type: string;
|
||||
summary: string;
|
||||
};
|
||||
tool_calls?: ToolCallDelta[];
|
||||
};
|
||||
finish_reason?: 'stop' | 'tool_calls' | 'length' | 'content_filter' | null;
|
||||
}[];
|
||||
finish_reason?: 'stop' | 'tool_calls' | 'length' | 'content_filter' | null;
|
||||
native_finish_reason?: string | null;
|
||||
usage?: {
|
||||
prompt_tokens: number;
|
||||
completion_tokens: number;
|
||||
total_tokens: number;
|
||||
cost: number;
|
||||
is_byok: boolean;
|
||||
prompt_tokens_details: {
|
||||
cached_tokens: number;
|
||||
};
|
||||
cost_details: {
|
||||
upstream_inference_cost: number;
|
||||
upstream_prompt_cost: number;
|
||||
upstream_inference_completions_cost: number;
|
||||
},
|
||||
completion_tokens_details: {
|
||||
reasoning_tokens: number;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
export class GPTResponseIterator extends ThenableIterator<SSEvent, MessageChunk, FinalResult> {
|
||||
/**
|
||||
* Accumulates tool calls as they stream in from the API
|
||||
* Key is the tool call index, value is the partially completed tool call
|
||||
*/
|
||||
private toolCallsInProgress: Map<number, Partial<ToolCall>> = new Map();
|
||||
|
||||
constructor(iterator: AsyncIterable<SSEvent>) {
|
||||
super(iterator);
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a raw SSE chunk and returns one or more MessageChunks
|
||||
* May return multiple chunks when tool calls complete
|
||||
*/
|
||||
parseChunk(rawChunk: SSEvent): MessageChunk[] {
|
||||
// console.log('Raw Chunk:', rawChunk);
|
||||
if (rawChunk.data === '[DONE]') {
|
||||
// When stream ends, flush any pending tool calls
|
||||
const completedToolCalls = this.flushToolCalls();
|
||||
|
||||
// If there are completed tool calls, return them
|
||||
if (completedToolCalls.length > 0) {
|
||||
return completedToolCalls;
|
||||
}
|
||||
|
||||
// Otherwise, return an empty content chunk so we don't crash trying to parse invalid data
|
||||
return [{
|
||||
type: 'content',
|
||||
content: '',
|
||||
}];
|
||||
}
|
||||
|
||||
// Parse the chunk data as a GPTResponse
|
||||
const data = JSON.parse(rawChunk.data) as GPTResponse;
|
||||
// Get the first choice
|
||||
const choice = data.choices[0];
|
||||
|
||||
// If no choice found, throw an error
|
||||
if (!choice) {
|
||||
throw new Error('No choice found in chunk');
|
||||
}
|
||||
|
||||
// Get the delta from the choice
|
||||
const delta = choice.delta;
|
||||
// Get the finish reason from the choice or the response
|
||||
const finishReason = choice.finish_reason || data.finish_reason;
|
||||
|
||||
// Handle tool calls
|
||||
if (delta.tool_calls) {
|
||||
// Process the tool call deltas
|
||||
this.processToolCallDeltas(delta.tool_calls);
|
||||
|
||||
// If finish_reason is 'tool_calls', all tool calls are complete
|
||||
if (finishReason === 'tool_calls') {
|
||||
// If all tool calls are complete, flush them and return them as chunks
|
||||
return this.flushToolCalls();
|
||||
}
|
||||
|
||||
// Otherwise, don't yield anything yet (still accumulating)
|
||||
return [];
|
||||
}
|
||||
|
||||
// Handle reasoning chunks
|
||||
if (delta.reasoning) {
|
||||
const chunk: MessageChunk = {
|
||||
type: 'reasoning',
|
||||
content: delta.reasoning,
|
||||
};
|
||||
|
||||
// Add reasoning_details if present
|
||||
if (delta.reasoning_details?.summary) {
|
||||
chunk.reasoning_details = delta.reasoning_details.summary;
|
||||
}
|
||||
|
||||
return [chunk];
|
||||
}
|
||||
|
||||
// Handle content chunks
|
||||
if (delta.content !== undefined && delta.content !== null) {
|
||||
return [{
|
||||
type: 'content',
|
||||
content: delta.content,
|
||||
}];
|
||||
}
|
||||
|
||||
// Empty chunk (e.g., role assignment)
|
||||
return [];
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a final result from the output chunks
|
||||
*
|
||||
* @param chunks - The output chunks to parse
|
||||
* @returns The parsed final result
|
||||
*/
|
||||
parseFinal(chunks: MessageChunk[]): FinalResult {
|
||||
return {
|
||||
reasoning: chunks
|
||||
.filter(c => c.type === 'reasoning')
|
||||
.map(c => 'content' in c ? c.content : '')
|
||||
.join(''),
|
||||
content: chunks
|
||||
.filter(c => c.type === 'content')
|
||||
.map(c => 'content' in c ? c.content : '')
|
||||
.join(''),
|
||||
toolCalls: chunks
|
||||
.filter(c => c.type === 'tool_call')
|
||||
.map(c => 'toolCall' in c ? c.toolCall : null)
|
||||
.filter((tc): tc is ToolCall => tc !== null),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Processes tool call deltas and accumulates them
|
||||
*/
|
||||
private processToolCallDeltas(deltas: ToolCallDelta[]): void {
|
||||
for (const delta of deltas) {
|
||||
const index = delta.index;
|
||||
|
||||
if (!this.toolCallsInProgress.has(index)) {
|
||||
// Start a new tool call
|
||||
this.toolCallsInProgress.set(index, {
|
||||
id: delta.id || '',
|
||||
type: 'function',
|
||||
function: {
|
||||
name: delta.function?.name || '',
|
||||
arguments: delta.function?.arguments || '',
|
||||
},
|
||||
});
|
||||
} else {
|
||||
// Accumulate arguments for existing tool call
|
||||
const existing = this.toolCallsInProgress.get(index)!;
|
||||
|
||||
if (delta.function?.arguments) {
|
||||
existing.function!.arguments += delta.function.arguments;
|
||||
}
|
||||
|
||||
// Update other fields if provided
|
||||
if (delta.id) {
|
||||
existing.id = delta.id;
|
||||
}
|
||||
if (delta.function?.name) {
|
||||
existing.function!.name = delta.function.name;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Flushes all accumulated tool calls and returns them as chunks
|
||||
*/
|
||||
private flushToolCalls(): MessageChunk[] {
|
||||
const chunks: MessageChunk[] = [];
|
||||
|
||||
// Convert accumulated tool calls to chunks
|
||||
for (const [index, toolCall] of this.toolCallsInProgress.entries()) {
|
||||
// Validate that the tool call is complete
|
||||
if (toolCall.id && toolCall.function?.name && toolCall.function?.arguments !== undefined) {
|
||||
chunks.push({
|
||||
type: 'tool_call',
|
||||
toolCall: toolCall as ToolCall,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Clear the accumulator
|
||||
this.toolCallsInProgress.clear();
|
||||
|
||||
return chunks;
|
||||
}
|
||||
}
|
||||
136
src/gpt/gpt.ts
Normal file
136
src/gpt/gpt.ts
Normal file
@@ -0,0 +1,136 @@
|
||||
import { SSESession } from '../utils/sse-session.js';
|
||||
import { EventEmitter } from '../utils/event-emitter.js';
|
||||
import { GPTResponseIterator, type ToolCall, type ToolDefinition } from './gpt-response.js';
|
||||
|
||||
|
||||
export type GPTEventMap = {
|
||||
/**
|
||||
* Emitted when a message is sent
|
||||
*/
|
||||
messageSent: { mesasge: string };
|
||||
|
||||
/**
|
||||
* Emitted when a message chunk is received
|
||||
*/
|
||||
messageChunkReceived: { chunk: string };
|
||||
|
||||
/**
|
||||
* Emitted when a response is received
|
||||
*/
|
||||
responseReceived: { response: string };
|
||||
|
||||
/**
|
||||
* Emitted when a tool is called
|
||||
*/
|
||||
toolCalled: { toolName: string; arguments: Record<string, unknown>; result: unknown };
|
||||
}
|
||||
|
||||
export type GPTConfig = {
|
||||
/**
|
||||
* The API key to use for the GPT API
|
||||
*/
|
||||
apiKey: string;
|
||||
|
||||
/**
|
||||
* The API URL to use for the GPT API
|
||||
*/
|
||||
apiUrl: string;
|
||||
|
||||
/**
|
||||
* The model to use for the GPT API
|
||||
*/
|
||||
model: string;
|
||||
}
|
||||
|
||||
/**
|
||||
* Message types that can be sent to the GPT API
|
||||
*/
|
||||
export type Message =
|
||||
| {
|
||||
role: 'user' | 'system';
|
||||
content: string;
|
||||
}
|
||||
| {
|
||||
role: 'assistant';
|
||||
content: string | null;
|
||||
tool_calls?: ToolCall[];
|
||||
}
|
||||
| {
|
||||
role: 'tool';
|
||||
tool_call_id: string;
|
||||
name: string;
|
||||
content: string;
|
||||
};
|
||||
|
||||
/**
|
||||
* Request configuration for GPT API calls
|
||||
*/
|
||||
export type GPTRequest = {
|
||||
/**
|
||||
* The messages to send to the GPT API
|
||||
*/
|
||||
messages: Message[];
|
||||
|
||||
/**
|
||||
* Optional tool definitions for function calling
|
||||
*/
|
||||
tools?: ToolDefinition[];
|
||||
|
||||
/**
|
||||
* Controls which (if any) tool is called by the model
|
||||
* - 'auto' (default): model decides whether to call a tool
|
||||
* - 'none': model will not call any tools
|
||||
* - { type: 'function', function: { name: 'tool_name' } }: forces a specific tool call
|
||||
*/
|
||||
tool_choice?: 'auto' | 'none' | { type: 'function'; function: { name: string } };
|
||||
}
|
||||
|
||||
export class GPT extends EventEmitter<GPTEventMap> {
|
||||
constructor(public config: GPTConfig) {
|
||||
super();
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a message to the GPT API
|
||||
* @param message - The message to send
|
||||
* @returns The response from the GPT API
|
||||
*/
|
||||
send(request: GPTRequest): GPTResponseIterator {
|
||||
const config = this.config;
|
||||
|
||||
const lazyIterator = (async function* () {
|
||||
// Build the API request body
|
||||
const requestBody: Record<string, unknown> = {
|
||||
model: config.model,
|
||||
messages: request.messages,
|
||||
stream: true,
|
||||
};
|
||||
|
||||
// Add tools if provided
|
||||
if (request.tools && request.tools.length > 0) {
|
||||
requestBody.tools = request.tools;
|
||||
}
|
||||
|
||||
// Add tool_choice if provided
|
||||
if (request.tool_choice) {
|
||||
requestBody.tool_choice = request.tool_choice;
|
||||
}
|
||||
|
||||
const session = await SSESession.from(config.apiUrl, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${config.apiKey}`,
|
||||
},
|
||||
method: 'POST',
|
||||
body: JSON.stringify(requestBody),
|
||||
});
|
||||
|
||||
if (!session.messages) {
|
||||
throw new Error('Failed to create SSE session');
|
||||
}
|
||||
|
||||
yield* session.messages;
|
||||
})();
|
||||
|
||||
return new GPTResponseIterator(lazyIterator);
|
||||
}
|
||||
}
|
||||
308
src/index.ts
308
src/index.ts
@@ -1,39 +1,283 @@
|
||||
import { GPT } from './gpt.js';
|
||||
export {
|
||||
GPT,
|
||||
type GPTConfig,
|
||||
type GPTRequest,
|
||||
type Message,
|
||||
type GPTEventMap,
|
||||
} from './gpt/gpt.js'
|
||||
|
||||
const gptConfig = {
|
||||
apiKey: process.env.OPENROUTER_API_KEY || '',
|
||||
apiUrl: 'https://openrouter.ai/api/v1/chat/completions',
|
||||
model: 'x-ai/grok-4.1-fast',
|
||||
}
|
||||
export {
|
||||
AgenticGPT,
|
||||
type ToolFunction,
|
||||
type ToolRegistration,
|
||||
type AgenticOptions,
|
||||
} from './gpt/agentic-gpt.js';
|
||||
|
||||
const gpt = new GPT(gptConfig);
|
||||
export {
|
||||
GPTResponseIterator,
|
||||
type MessageChunk,
|
||||
type FinalResult,
|
||||
type ToolDefinition,
|
||||
type ToolCall,
|
||||
type ToolCallDelta,
|
||||
type JSONSchema,
|
||||
type GPTResponse,
|
||||
} from './gpt/gpt-response.js';
|
||||
|
||||
const request = gpt.send({ messages: [{ role: 'user', content: 'Hello, how are you?' }] });
|
||||
|
||||
let lastChunk = { type: 'reasoning' } as { type: 'reasoning' | 'content'; reasoning?: string; reasoning_details?: string; content: string };
|
||||
|
||||
for await (const chunk of request) {
|
||||
if (lastChunk.type === 'reasoning' && chunk.type === 'content') {
|
||||
process.stdout.write('\n')
|
||||
}
|
||||
|
||||
process.stdout.write(chunk.content);
|
||||
|
||||
lastChunk = chunk;
|
||||
}
|
||||
|
||||
console.log('\n');
|
||||
console.log('--------------------------------');
|
||||
console.log('Streaming Results Completed');
|
||||
console.log('--------------------------------\n\n');
|
||||
import { GPT } from './gpt/gpt.js';
|
||||
import { AgenticGPT } from './gpt/agentic-gpt.js';
|
||||
import type { ToolDefinition } from './gpt/gpt-response.js';
|
||||
|
||||
/**
|
||||
* Generate the full response and get the final result
|
||||
* Examples demonstrating the different usage patterns
|
||||
* Uncomment the example you want to run
|
||||
*/
|
||||
const response = await gpt.send({ messages: [{ role: 'user', content: 'Hello, how are you?' }] });
|
||||
console.log(response);
|
||||
|
||||
console.log('\n');
|
||||
console.log('--------------------------------');
|
||||
console.log('Final Result Generated');
|
||||
console.log('--------------------------------\n\n');
|
||||
// =============================================================================
|
||||
// Example 1: Basic GPT streaming (no tools)
|
||||
// =============================================================================
|
||||
async function basicStreamingExample() {
|
||||
|
||||
const gptConfig = {
|
||||
apiKey: process.env.OPENROUTER_API_KEY || '',
|
||||
apiUrl: 'https://openrouter.ai/api/v1/chat/completions',
|
||||
model: 'x-ai/grok-4.1-fast',
|
||||
};
|
||||
|
||||
const gpt = new GPT(gptConfig);
|
||||
|
||||
console.log('=== Basic Streaming Example ===\n');
|
||||
|
||||
const request = gpt.send({
|
||||
messages: [{ role: 'user', content: 'Tell me a short joke about programming.' }]
|
||||
});
|
||||
|
||||
let lastChunk = { type: 'reasoning' } as { type: 'reasoning' | 'content' | 'tool_call' };
|
||||
|
||||
for await (const chunk of request) {
|
||||
// Handle different chunk types
|
||||
if (chunk.type === 'reasoning') {
|
||||
process.stdout.write(chunk.content);
|
||||
} else if (chunk.type === 'content') {
|
||||
if (lastChunk.type === 'reasoning') {
|
||||
process.stdout.write('\n');
|
||||
}
|
||||
process.stdout.write(chunk.content);
|
||||
}
|
||||
|
||||
lastChunk = chunk;
|
||||
}
|
||||
|
||||
console.log('\n\n=== Streaming Completed ===\n');
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Example 2: Manual tool call handling (streaming with tools)
|
||||
// =============================================================================
|
||||
async function manualToolHandlingExample() {
|
||||
const gptConfig = {
|
||||
apiKey: process.env.OPENROUTER_API_KEY || '',
|
||||
apiUrl: 'https://openrouter.ai/api/v1/chat/completions',
|
||||
model: 'x-ai/grok-4.1-fast',
|
||||
};
|
||||
|
||||
const gpt = new GPT(gptConfig);
|
||||
|
||||
console.log('=== Manual Tool Handling Example ===\n');
|
||||
|
||||
// Define a weather tool
|
||||
const weatherTool: ToolDefinition = {
|
||||
type: 'function' as const,
|
||||
function: {
|
||||
name: 'get_weather',
|
||||
description: 'Get the current weather for a location',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
location: {
|
||||
type: 'string',
|
||||
description: 'City name, e.g., "Paris" or "New York"',
|
||||
},
|
||||
units: {
|
||||
type: 'string',
|
||||
enum: ['celsius', 'fahrenheit'],
|
||||
description: 'Temperature units',
|
||||
},
|
||||
},
|
||||
required: ['location', 'units'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
const response = gpt.send({
|
||||
messages: [{ role: 'user', content: 'What is the weather like in Paris?' }],
|
||||
tools: [weatherTool],
|
||||
});
|
||||
|
||||
console.log('Streaming response:\n');
|
||||
|
||||
for await (const chunk of response) {
|
||||
if (chunk.type === 'content' && chunk.content) {
|
||||
process.stdout.write(chunk.content);
|
||||
} else if (chunk.type === 'tool_call') {
|
||||
console.log('\n\nTool called:', chunk.toolCall.function.name);
|
||||
console.log('Arguments:', chunk.toolCall.function.arguments);
|
||||
console.log('(In a real app, you would execute the tool here and continue the conversation)');
|
||||
}
|
||||
}
|
||||
|
||||
console.log('\n\n=== Manual Tool Handling Completed ===\n');
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Example 3: Automatic tool execution with AgenticGPT
|
||||
// =============================================================================
|
||||
async function agenticToolExecutionExample() {
|
||||
const gptConfig = {
|
||||
apiKey: process.env.OPENROUTER_API_KEY || '',
|
||||
apiUrl: 'https://openrouter.ai/api/v1/chat/completions',
|
||||
model: 'x-ai/grok-4.1-fast',
|
||||
};
|
||||
|
||||
console.log('=== Agentic Tool Execution Example ===\n');
|
||||
|
||||
// Define tools with their execution functions
|
||||
const weatherTool: ToolDefinition = {
|
||||
type: 'function' as const,
|
||||
function: {
|
||||
name: 'get_weather',
|
||||
description: 'Get the current weather for a location',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
location: {
|
||||
type: 'string',
|
||||
description: 'City name',
|
||||
},
|
||||
units: {
|
||||
type: 'string',
|
||||
enum: ['celsius', 'fahrenheit'],
|
||||
description: 'Temperature units',
|
||||
},
|
||||
},
|
||||
required: ['location', 'units'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
// Create weather function
|
||||
const getWeather = async (args: Record<string, unknown>) => {
|
||||
const location = args.location as string;
|
||||
const units = args.units as string;
|
||||
|
||||
console.log(`\n[Executing get_weather(${location}, ${units})]`);
|
||||
|
||||
// Simulate API call
|
||||
await new Promise(resolve => setTimeout(resolve, 500));
|
||||
|
||||
return {
|
||||
location,
|
||||
temperature: units === 'celsius' ? 22 : 72,
|
||||
condition: 'sunny',
|
||||
units,
|
||||
};
|
||||
};
|
||||
|
||||
// Create AgenticGPT with tools
|
||||
const agenticGPT = new AgenticGPT(gptConfig, [
|
||||
{ definition: weatherTool, fn: getWeather }
|
||||
]);
|
||||
|
||||
// Listen for tool execution events
|
||||
agenticGPT.on('toolCalled', (event) => {
|
||||
console.log(`\n[Tool executed: ${event.toolName}]`);
|
||||
console.log('[Result:', JSON.stringify(event.result), ']');
|
||||
});
|
||||
|
||||
console.log('User: What is the weather in Paris and London?\n');
|
||||
console.log('Assistant: ');
|
||||
|
||||
// Send request with automatic tool execution
|
||||
for await (const chunk of agenticGPT.sendWithTools({
|
||||
messages: [{
|
||||
role: 'user',
|
||||
content: 'What is the weather in Paris and London? Use celsius for Paris and fahrenheit for London.'
|
||||
}],
|
||||
})) {
|
||||
if (chunk.type === 'content' && chunk.content) {
|
||||
process.stdout.write(chunk.content);
|
||||
} else if (chunk.type === 'tool_call') {
|
||||
console.log(`\n[Calling tool: ${chunk.toolCall.function.name}]`);
|
||||
}
|
||||
}
|
||||
|
||||
console.log('\n\n=== Agentic Tool Execution Completed ===\n');
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Example 4: Thenable pattern with tool history
|
||||
// =============================================================================
|
||||
async function thenableWithToolHistoryExample() {
|
||||
const gptConfig = {
|
||||
apiKey: process.env.OPENROUTER_API_KEY || '',
|
||||
apiUrl: 'https://openrouter.ai/api/v1/chat/completions',
|
||||
model: 'x-ai/grok-4.1-fast',
|
||||
};
|
||||
|
||||
console.log('=== Thenable Pattern with Tool History Example ===\n');
|
||||
|
||||
const calculatorTool: ToolDefinition = {
|
||||
type: 'function' as const,
|
||||
function: {
|
||||
name: 'calculate',
|
||||
description: 'Perform mathematical calculations',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
expression: {
|
||||
type: 'string',
|
||||
description: 'Mathematical expression to evaluate, e.g., "2 + 2"',
|
||||
},
|
||||
},
|
||||
required: ['expression'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
};
|
||||
|
||||
// For this example, we'll use the base GPT class with thenable pattern
|
||||
const gpt = new GPT(gptConfig);
|
||||
|
||||
console.log('Requesting response without streaming...\n');
|
||||
|
||||
// Use thenable pattern (await the response directly)
|
||||
// This will consume the stream automatically and return the final result
|
||||
const result = await gpt.send({
|
||||
messages: [{
|
||||
role: 'user',
|
||||
content: 'Use the calculator tool to calculate 2 + 2'
|
||||
}],
|
||||
tools: [calculatorTool],
|
||||
});
|
||||
|
||||
console.log('\n=== Final Result ===');
|
||||
console.log('Content:', result.content);
|
||||
console.log('Reasoning:', result.reasoning || '(none)');
|
||||
console.log('\nTool Calls Made:', result.toolCalls.length);
|
||||
result.toolCalls.forEach((tc, i) => {
|
||||
console.log(` ${i + 1}. ${tc.function.name}(${tc.function.arguments})`);
|
||||
});
|
||||
console.log('\n=== Thenable Example Completed ===\n');
|
||||
}
|
||||
|
||||
// =============================================================================
|
||||
// Run examples
|
||||
// =============================================================================
|
||||
|
||||
// Uncomment the example you want to run:
|
||||
await basicStreamingExample();
|
||||
await manualToolHandlingExample();
|
||||
await agenticToolExecutionExample();
|
||||
await thenableWithToolHistoryExample();
|
||||
133
src/utils/thenable-iterator.ts
Normal file
133
src/utils/thenable-iterator.ts
Normal file
@@ -0,0 +1,133 @@
|
||||
|
||||
|
||||
export class ThenableIterator<
|
||||
InputChunk,
|
||||
OutputChunk,
|
||||
Output,
|
||||
> implements PromiseLike<Output> {
|
||||
/** Iterator to be consumed */
|
||||
protected iterator: AsyncIterable<InputChunk>;
|
||||
|
||||
/** Chunks to be parsed */
|
||||
protected chunks: OutputChunk[] = [];
|
||||
|
||||
/** Whether the iterator has been consumed */
|
||||
protected iteratorConsumed = false;
|
||||
|
||||
/** Promise to resolve the result */
|
||||
protected resultPromise: Promise<Output>;
|
||||
|
||||
/** Resolver function to resolve the result promise */
|
||||
protected resolveResult!: (value: Output) => void;
|
||||
|
||||
/**
|
||||
* Creates a new ThenableIterator instance
|
||||
* @param iterator - The iterator to be consumed
|
||||
*/
|
||||
constructor(iterator: AsyncIterable<InputChunk>) {
|
||||
// Store the iterator to be consumed
|
||||
this.iterator = iterator;
|
||||
|
||||
// Create a promise that we can bind to the thenable pattern
|
||||
this.resultPromise = new Promise((resolve) => this.resolveResult = resolve);
|
||||
}
|
||||
|
||||
/**
|
||||
* `Symbol.asyncIterator` method to implement the async iterator pattern
|
||||
* Consumes the iterator if it has not yet been consumed, then yields the parsed chunks
|
||||
*
|
||||
* @returns An async iterator that yields the parsed chunks
|
||||
* @throws An error if the iterator has already been consumed
|
||||
*/
|
||||
async *[Symbol.asyncIterator]() {
|
||||
if (this.iteratorConsumed) {
|
||||
throw new Error('Iterator can only be iterated once');
|
||||
}
|
||||
|
||||
this.iteratorConsumed = true;
|
||||
|
||||
for await (const chunk of this.iterator) {
|
||||
const parsedChunks = this.parseChunk(chunk);
|
||||
|
||||
for (const parsedChunk of parsedChunks) {
|
||||
this.chunks.push(parsedChunk);
|
||||
yield parsedChunk;
|
||||
}
|
||||
}
|
||||
|
||||
this.resolveResult(this.parseFinal(this.chunks));
|
||||
}
|
||||
|
||||
/**
|
||||
* `then` method to implement the thenable pattern
|
||||
* Consumes the iterator if it has not yet been consumed, then returns the result as a resolved promise
|
||||
*
|
||||
* @param onfulfilled - The function to call when the promise is fulfilled
|
||||
* @param onrejected - The function to call when the promise is rejected
|
||||
* @returns A promise that resolves to the result
|
||||
*/
|
||||
then<TResult1 = Output, TResult2 = never>(
|
||||
onfulfilled?: ((value: Output) => TResult1 | PromiseLike<TResult1>) | null,
|
||||
onrejected?: ((reason: unknown) => TResult2 | PromiseLike<TResult2>) | null,
|
||||
): Promise<TResult1 | TResult2> {
|
||||
// If not yet iterated, consume the iterator to get the result
|
||||
if (!this.iteratorConsumed) {
|
||||
this.iteratorConsumed = true;
|
||||
|
||||
// Consume the iterator parts
|
||||
(async () => {
|
||||
for await (const rawChunk of this.iterator) {
|
||||
const chunks = this.parseChunk(rawChunk);
|
||||
this.chunks.push(...chunks);
|
||||
}
|
||||
|
||||
// Build the result from the chunks
|
||||
this.resolveResult(this.parseFinal(this.chunks));
|
||||
})();
|
||||
}
|
||||
|
||||
return this.resultPromise.then(onfulfilled, onrejected);
|
||||
}
|
||||
|
||||
/**
|
||||
* `catch` method to implement the promise pattern
|
||||
* Returns a promise that rejects with the reason
|
||||
*
|
||||
* @param onrejected - The function to call when the promise is rejected
|
||||
* @returns A promise that rejects with the reason
|
||||
*/
|
||||
catch(onrejected?: ((reason: unknown) => never) | null): Promise<Output> {
|
||||
return this.resultPromise.catch(onrejected);
|
||||
}
|
||||
|
||||
/**
|
||||
* `finally` method to implement the promise pattern
|
||||
* Returns a promise that resolves to the result
|
||||
*
|
||||
* @param onfinally - The function to call when the promise is fulfilled or rejected
|
||||
* @returns A promise that resolves to the result
|
||||
*/
|
||||
finally(onfinally?: (() => void) | undefined): Promise<Output> {
|
||||
return this.resultPromise.finally(onfinally);
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a chunk of input into an output chunk
|
||||
*
|
||||
* @param chunk - The chunk of input to parse
|
||||
* @returns The parsed output chunk
|
||||
*/
|
||||
parseChunk(chunk: InputChunk): OutputChunk[] {
|
||||
return [chunk] as unknown as OutputChunk[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Parses a final result from the output chunks
|
||||
*
|
||||
* @param chunks - The output chunks to parse
|
||||
* @returns The parsed final result
|
||||
*/
|
||||
parseFinal(chunks: OutputChunk[]): Output {
|
||||
return chunks as unknown as Output;
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user