Getting Started

Getting Started

This guide will walk you through installing @obayd/agentic and building your first simple LLM agent with function calling.

1. Installation

Install the package:

npm install @obayd/agentic

2. Basic Setup

Let's create a simple agent that can tell the current weather using a custom tool.

// Import necessary components
import { Conversation, Tool, fetchResponseToStream } from '@obayd/agentic';

// --- Step 1: Define your LLM Interaction Callback ---
// This async generator function connects to your chosen LLM API.
// It receives the message history and must yield string chunks from the LLM response.

async function* llmCallback(messages, options) {
    // Replace with your actual API endpoint, key, and model
    const YOUR_LLM_API_ENDPOINT = "YOUR_LLM_API_ENDPOINT";
    const YOUR_API_KEY = "YOUR_API_KEY";
    const YOUR_MODEL_NAME = "YOUR_MODEL_NAME";

    try {
        const response = await fetch(YOUR_LLM_API_ENDPOINT, {
            method: "POST",
            headers: {
                "Content-Type": "application/json",
                "Authorization": `Bearer ${YOUR_API_KEY}`,
                // Add any other necessary headers (e.g., Anthropic version header)
            },
            body: JSON.stringify({
                model: YOUR_MODEL_NAME,
                messages: messages, // Pass the formatted messages directly from the Conversation class
                stream: true,       // IMPORTANT: Ensure streaming is enabled
                // Add any model-specific parameters (temperature, max_tokens, etc.)
            }),
        });

        // Use the provided helper to process Server-Sent Events (SSE) streams
        // This handles common formats like OpenAI's streaming output
        yield* fetchResponseToStream(response);

    } catch (error) {
        console.error("LLM Callback Error:", error);
        // Yielding an error message helps surface issues
        yield `[Error connecting to LLM: ${error.message}]`;
    }
}


// --- Step 2: Define Your Tools ---
// Tools are functions the LLM can request to call.

const getCurrentWeather = Tool.make("get_current_weather")
    .description("Gets the current weather for a specified location.")
    // Define parameters the LLM needs to provide
    .param("location", "The city and state, e.g., San Francisco, CA", { required: true, type: "string" })
    .param("unit", "Temperature unit", { enum: ["celsius", "fahrenheit"], required: false, type: "string" }) // Optional param with allowed values
    // Define the action function that executes when the tool is called
    .action(async (params, instance, ...args) => {
        // `params` contains the arguments provided by the LLM (e.g., { location: 'Tokyo', unit: 'celsius' })
        // `instance` is the Conversation object itself (rarely needed here)
        // `...args` are any extra arguments passed via conversation.send(..., ...args)
        console.log(`[Tool Action] Executing get_current_weather with params:`, params);

        // --- Your actual tool logic goes here ---
        // Example: Simulate fetching weather data
        await new Promise(resolve => setTimeout(resolve, 50)); // Simulate network delay
        const location = params.location.toLowerCase();
        const unit = params.unit || "celsius";
        let temperature;

        if (location.includes("tokyo")) temperature = 15;
        else if (location.includes("san francisco")) temperature = 12;
        else temperature = 20; // Default

        if (unit === "fahrenheit") {
            temperature = (temperature * 9/5) + 32;
        }

        // Return a result object (must be JSON serializable)
        // This object will be sent back to the LLM
        return JSON.stringify({
            temperature: temperature,
            unit: unit,
            condition: "Mostly Sunny",
            location: params.location, // Echo back for clarity
        });
    });


// --- Step 3: Initialize the Conversation ---
const conversation = new Conversation(llmCallback);

// --- Step 4: Define Conversation Content ---
// This sets up the initial system prompt and available tools.
conversation.content([
    // Add system prompt parts
    "You are a friendly weather assistant.",
    "Use the available tools to answer user questions about the weather.",
    // Add available tools
    getCurrentWeather,
]);


// --- Step 5: Run the Conversation ---
// Use an async function to handle the asynchronous stream.
async function run() {
    const userInput = "How's the weather in San Francisco today?";
    console.log(`\nšŸ‘¤ USER: ${userInput}`);
    console.log("\nšŸ¤– ASSISTANT:");

    try {
        // Call `send` to get the async generator for the response stream
        const stream = conversation.send(userInput);

        // Iterate through the events yielded by the stream
        for await (const event of stream) {
            switch (event.type) {
                case 'assistant':
                    // Assistant text chunk received
                    process.stdout.write(event.content);
                    break;
                case 'tool.generating':
                    // (Optional) LLM is generating raw input for the tool
                     // console.log(`\n[Tool Raw Chunk (${event.name})]: ${event.rawChunk}`);
                    break;
                case 'tool.calling':
                    // The Conversation decided to call a tool
                    process.stdout.write(`\n[āš™ļø Calling Tool: ${event.name}(${JSON.stringify(event.params)})]\n`);
                    break;
                case 'tool':
                    // The tool finished executing, result is available
                    console.log(`\n[āœ… Tool Result (${event.name})]: ${JSON.stringify(event.result)}`);
                    // The result is automatically sent back to the LLM in the next turn.
                    console.log("\nšŸ¤– ASSISTANT (processing tool result...):");
                    break;
                case 'error':
                    // An error occurred during conversation processing
                    console.error(`\n[āŒ Conversation Error]: ${event.content}`);
                    break;
            }
        }
        console.log('\n\n--- Conversation Finished ---');

        // Optional: Inspect the final message history
        // console.log("\nFinal Message History:");
        // console.log(JSON.stringify(conversation.messages, null, 2));

    } catch (error) {
        // Catch critical errors during the send/processing loop
        console.error("\n[ šŸ’„ Critical Error Running Conversation ]:", error);
    }
}

// Start the conversation
run();

Next Steps

You've built your first agent! Explore the Core Concepts section to learn more about:

  • Conversation management in detail.

  • Defining complex Tools.

  • Organizing tools with Toolpacks.

  • Advanced LLM Integration.

  • Understanding Streaming & Events.

Last updated