Documentation

Agent Code Examples

Production-ready agent code examples for Python, TypeScript, LangChain, task decomposition, error handling, parallel processing, and cost optimization patterns.

Production-ready integration examples for autonomous agents, AI coding assistants, and intelligent systems.

Python Examples

Basic Agent Integration

from openai import OpenAI
import os

class SkillBossAgent:
    def __init__(self):
        self.client = OpenAI(
            base_url="https://api.skillboss.co/v1",
            api_key=os.getenv("SKILLBOSS_API_KEY")
        )

    def chat(self, prompt, model="claude-4-5-sonnet"):
        response = self.client.chat.completions.create(
            model=model,
            messages=[{"role": "user", "content": prompt}],
            max_tokens=2000
        )
        return response.choices[0].message.content

# Usage
agent = SkillBossAgent()
result = agent.chat("Explain quantum computing")
print(result)

Multi-Model Router

class SmartRouter:
    """Route tasks to optimal models based on complexity and cost"""

    MODELS = {
        "reasoning": "claude-4-5-sonnet",  # $3-15/1M
        "creative": "gpt-5",                        # $15/1M
        "fast": "gemini-2.5-flash",                # $0.10-0.40/1M
        "cheap": "deepseek/deepseek-v3"            # $0.27/1M
    }

    def __init__(self, api_key):
        self.client = OpenAI(
            base_url="https://api.skillboss.co/v1",
            api_key=api_key
        )

    def route(self, prompt, task_type="fast"):
        model = self.MODELS.get(task_type, "gemini-2.5-flash")

        response = self.client.chat.completions.create(
            model=model,
            messages=[{"role": "user", "content": prompt}]
        )

        return {
            "result": response.choices[0].message.content,
            "model_used": model,
            "tokens": response.usage.total_tokens
        }

# Usage
router = SmartRouter(os.getenv("SKILLBOSS_API_KEY"))

# Fast query (uses Gemini Flash - $0.10/1M)
answer = router.route("What's 2+2?", "fast")

# Complex reasoning (uses Claude - $3-15/1M)
analysis = router.route("Analyze this business model...", "reasoning")

# Creative content (uses GPT-5 - $15/1M)
story = router.route("Write a sci-fi story", "creative")

Error Handling & Retry Logic

import time
from openai import OpenAI, OpenAIError

class RobustAgent:
    def __init__(self, api_key):
        self.client = OpenAI(
            base_url="https://api.skillboss.co/v1",
            api_key=api_key
        )

    def call_with_retry(self, model, messages, max_retries=3):
        for attempt in range(max_retries):
            try:
                return self.client.chat.completions.create(
                    model=model,
                    messages=messages
                )

            except OpenAIError as e:
                if e.status_code == 429:  # Rate limit
                    wait = 2 ** attempt  # Exponential backoff
                    print(f"Rate limited. Waiting {wait}s...")
                    time.sleep(wait)

                elif e.status_code == 402:  # Insufficient credits
                    print("❌ Insufficient credits!")
                    raise

                elif e.status_code == 503:  # Service unavailable
                    print(f"Service unavailable. Retry {attempt + 1}/{max_retries}")
                    time.sleep(2 ** attempt)

                else:
                    raise

        raise Exception("Max retries exceeded")

    def smart_call(self, prompt):
        """Try expensive model, fallback to cheaper if needed"""
        models = [
            "claude-4-5-sonnet",
            "gpt-4o",
            "gemini-2.5-flash"
        ]

        for model in models:
            try:
                response = self.call_with_retry(
                    model=model,
                    messages=[{"role": "user", "content": prompt}]
                )
                return response.choices[0].message.content

            except OpenAIError as e:
                if e.status_code == 402:  # Try cheaper model
                    print(f"Insufficient credits for {model}, trying next...")
                    continue
                raise

        raise Exception("All models failed")

JavaScript/TypeScript Examples

Node.js Integration

import OpenAI from 'openai';

class SkillBossAgent {
  private client: OpenAI;

  constructor(apiKey: string) {
    this.client = new OpenAI({
      baseURL: 'https://api.skillboss.co/v1',
      apiKey: apiKey
    });
  }

  async chat(prompt: string, model = 'claude-4-5-sonnet') {
    const response = await this.client.chat.completions.create({
      model,
      messages: [{ role: 'user', content: prompt }],
      max_tokens: 2000
    });

    return response.choices[0].message.content;
  }

  async streamChat(prompt: string) {
    const stream = await this.client.chat.completions.create({
      model: 'claude-4-5-sonnet',
      messages: [{ role: 'user', content: prompt }],
      stream: true
    });

    for await (const chunk of stream) {
      const content = chunk.choices[0]?.delta?.content || '';
      process.stdout.write(content);
    }
  }
}

// Usage
const agent = new SkillBossAgent(process.env.SKILLBOSS_API_KEY!);
const result = await agent.chat('Explain machine learning');
console.log(result);

Parallel Processing

async function parallelProcessing(tasks: string[]) {
  const client = new OpenAI({
    baseURL: 'https://api.skillboss.co/v1',
    apiKey: process.env.SKILLBOSS_API_KEY
  });

  // Process all tasks in parallel
  const promises = tasks.map(task =>
    client.chat.completions.create({
      model: 'gemini-2.5-flash',  // Fast model for parallel tasks
      messages: [{ role: 'user', content: task }]
    })
  );

  const results = await Promise.all(promises);

  return results.map(r => r.choices[0].message.content);
}

// Usage
const tasks = [
  'Summarize this article...',
  'Translate this text...',
  'Generate title ideas...'
];

const results = await parallelProcessing(tasks);

LangChain Integration

Python LangChain

from langchain_openai import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.chains import LLMChain

# Initialize with SkillBoss
llm = ChatOpenAI(
    base_url="https://api.skillboss.co/v1",
    api_key=os.getenv("SKILLBOSS_API_KEY"),
    model="claude-4-5-sonnet"
)

# Create chain
prompt = ChatPromptTemplate.from_template(
    "You are an expert in {topic}. {question}"
)
chain = LLMChain(llm=llm, prompt=prompt)

# Run
result = chain.run(topic="AI", question="Explain neural networks")
print(result)

Multi-Model LangChain

from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage, SystemMessage

class MultiModelChain:
    def __init__(self, api_key):
        self.models = {
            "claude": ChatOpenAI(
                base_url="https://api.skillboss.co/v1",
                api_key=api_key,
                model="claude-4-5-sonnet"
            ),
            "gpt5": ChatOpenAI(
                base_url="https://api.skillboss.co/v1",
                api_key=api_key,
                model="gpt-5"
            ),
            "gemini": ChatOpenAI(
                base_url="https://api.skillboss.co/v1",
                api_key=api_key,
                model="gemini-2.5-flash"
            )
        }

    def think(self, prompt):
        """Use Claude for reasoning"""
        return self.models["claude"].invoke([
            HumanMessage(content=prompt)
        ])

    def create(self, prompt):
        """Use GPT-5 for creativity"""
        return self.models["gpt5"].invoke([
            HumanMessage(content=prompt)
        ])

    def quick(self, prompt):
        """Use Gemini for speed"""
        return self.models["gemini"].invoke([
            HumanMessage(content=prompt)
        ])

Autonomous Agent Patterns

Task Decomposition Agent

class TaskAgent:
    def __init__(self, api_key):
        self.client = OpenAI(
            base_url="https://api.skillboss.co/v1",
            api_key=api_key
        )

    def decompose(self, complex_task):
        """Break complex task into subtasks"""
        response = self.client.chat.completions.create(
            model="claude-4-5-sonnet",
            messages=[{
                "role": "user",
                "content": f"""Break this task into 5-10 concrete subtasks:

Task: {complex_task}

Return as numbered list."""
            }]
        )
        return response.choices[0].message.content

    def execute_subtask(self, subtask):
        """Execute individual subtask"""
        response = self.client.chat.completions.create(
            model="gemini-2.5-flash",  # Fast for execution
            messages=[{"role": "user", "content": subtask}]
        )
        return response.choices[0].message.content

    def run(self, task):
        """Full autonomous execution"""
        print("πŸ€” Decomposing task...")
        subtasks = self.decompose(task)
        print(subtasks)

        results = []
        for i, subtask in enumerate(subtasks.split('\n')):
            if subtask.strip():
                print(f"\n⚑ Executing subtask {i+1}...")
                result = self.execute_subtask(subtask)
                results.append(result)
                print(f"βœ… Done: {result[:100]}...")

        return results

# Usage
agent = TaskAgent(os.getenv("SKILLBOSS_API_KEY"))
agent.run("Build a marketing campaign for new AI product")

Self-Improving Agent

class LearningAgent:
    def __init__(self, api_key):
        self.client = OpenAI(
            base_url="https://api.skillboss.co/v1",
            api_key=api_key
        )
        self.memory = []

    def act(self, task):
        """Execute task with memory context"""
        context = "\n".join(self.memory[-5:])  # Last 5 interactions

        prompt = f"""Previous learnings:
{context}

New task: {task}

Apply learnings and complete the task."""

        response = self.client.chat.completions.create(
            model="claude-4-5-sonnet",
            messages=[{"role": "user", "content": prompt}]
        )

        result = response.choices[0].message.content
        self.memory.append(f"Task: {task}\nResult: {result}")

        return result

    def reflect(self):
        """Self-reflect on performance"""
        response = self.client.chat.completions.create(
            model="claude-4-5-sonnet",
            messages=[{
                "role": "user",
                "content": f"""Analyze these past interactions and extract key learnings:

{chr(10).join(self.memory)}

What patterns or improvements can be identified?"""
            }]
        )

        insight = response.choices[0].message.content
        self.memory.append(f"Reflection: {insight}")
        return insight

Cost Optimization Examples

Budget-Aware Agent

class BudgetAgent:
    COSTS = {
        "gpt-5": 15,
        "claude-4-5-sonnet": 9,
        "gpt-4o": 6.25,
        "gemini-2.5-flash": 0.25,
        "deepseek/deepseek-v3": 0.27
    }

    def __init__(self, api_key, budget_per_request=0.01):
        self.client = OpenAI(
            base_url="https://api.skillboss.co/v1",
            api_key=api_key
        )
        self.budget = budget_per_request

    def select_model(self, estimated_tokens=1000):
        """Select cheapest model within budget"""
        cost_per_1k = {
            model: (price / 1000) * estimated_tokens / 1000
            for model, price in self.COSTS.items()
        }

        affordable = {
            model: cost
            for model, cost in cost_per_1k.items()
            if cost <= self.budget
        }

        if not affordable:
            return "gemini-2.5-flash"  # Fallback to cheapest

        # Return best model within budget
        return max(affordable, key=lambda m: self.COSTS[m])

    def chat(self, prompt, max_tokens=1000):
        model = self.select_model(max_tokens)
        print(f"πŸ’° Using {model} (within ${self.budget} budget)")

        response = self.client.chat.completions.create(
            model=model,
            messages=[{"role": "user", "content": prompt}],
            max_tokens=max_tokens
        )

        return response.choices[0].message.content

Next Steps

πŸ€–

For AI Agents

Complete autonomous agent integration guide

πŸ’»

Public Discovery

Browse all 100+ available services

πŸ“š

API Reference

Detailed API documentation

πŸ”‘

Get API Key

Tip: For AI agents, use set up skillboss.co/skill.md to auto-configure everything. Download your Skills Pack now


More Examples: