{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# 2. Create Your Own Agents\n", "\n", "This notebook expects that you're all setup (Have Claude desktop app running with an MCP Server setup in the previous notebook).\n", "\n", "Here we'll show you **how to pass your own Python functions to Claude** " ] }, { "cell_type": "code", "execution_count": 2, "metadata": {}, "outputs": [], "source": [ "import fused\n", "import json\n", "import os\n", "import time\n", "from pathlib import Path" ] }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ "# We still need your local paths\n", "PATH_TO_CLAUDE_CONFIG = (\n", " f\"{str(Path.home())}/Library/Application Support/Claude/claude_desktop_config.json\"\n", ")\n", "\n", "\n", "if not os.path.exists(PATH_TO_CLAUDE_CONFIG):\n", " # Creating the config file\n", " os.makedirs(os.path.dirname(PATH_TO_CLAUDE_CONFIG), exist_ok=True)\n", " with open(PATH_TO_CLAUDE_CONFIG, \"w\") as f:\n", " json.dump({}, f)\n", "\n", "assert os.path.exists(PATH_TO_CLAUDE_CONFIG), (\n", " \"Please update the PATH_TO_CLAUDE_CONFIG variable with the correct path to your Claude config file\"\n", ")" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, "outputs": [], "source": [ "# Local path to the Claude app\n", "CLAUDE_APP_PATH = \"/Applications/Claude.app\"\n", "assert os.path.exists(CLAUDE_APP_PATH), (\n", " \"Please update the CLAUDE_APP_PATH variable with the correct path to your Claude app\"\n", ")" ] }, { "cell_type": "code", "execution_count": 5, "metadata": {}, "outputs": [], "source": [ "# Change this path if you're not running this from the repo root\n", "WORKING_DIR = os.getcwd()" ] }, { "cell_type": "code", "execution_count": 22, "metadata": {}, "outputs": [], "source": [ "# We'll load the commons folder once again to have our helper functions\n", "commit = \"28821ea\"\n", "common = fused.load(\n", " f\"https://github.com/fusedio/udfs/tree/{commit}/public/common\"\n", ").utils" ] }, { "cell_type": "code", "execution_count": 7, "metadata": {}, "outputs": [ { "data": { "text/plain": [ "{'agents': [{'name': 'get_current_time', 'udfs': ['current_utc_time']},\n", " {'name': 'fused_docs', 'udfs': ['list_public_udfs', 'reading_fused_docs']}]}" ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# And see which agents we have available\n", "json.load(open(os.path.join(WORKING_DIR, \"agents.json\")))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "We'll make a simple UDF that gives us the top 5 stories from Hacker News as our basis" ] }, { "cell_type": "code", "execution_count": 9, "metadata": {}, "outputs": [], "source": [ "@fused.udf\n", "def udf(story_type: str = \"top\"):\n", " \"\"\"\n", " Fetches top posts from Hacker News as a dataframe.\n", "\n", " Parameters:\n", " story_type (str): Type of stories to fetch. Options are:\n", " - \"top\" for top stories\n", " - \"newest\" for latest stories\n", "\n", " Returns:\n", " pandas.DataFrame: DataFrame containing HN posts with columns:\n", " id, title, url, score, by (author), time, descendants (comments)\n", " \"\"\"\n", " import pandas as pd\n", " import requests\n", " import time\n", " from datetime import datetime\n", "\n", " # Validate input\n", " if story_type not in [\"top\", \"newest\"]:\n", " raise ValueError('Invalid story_type. Must be \"top\" or \"newest\"')\n", "\n", " # Map story_type to the appropriate HN API endpoint\n", " endpoint_map = {\"top\": \"topstories\", \"newest\": \"newstories\"}\n", "\n", " endpoint = endpoint_map[story_type]\n", "\n", " # Fetch the list of top or newest story IDs\n", " response = requests.get(f\"https://hacker-news.firebaseio.com/v0/{endpoint}.json\")\n", " story_ids = response.json()\n", "\n", " # Only doing 5 stories for now\n", " story_ids = story_ids[:5]\n", "\n", " # Fetch details for each story ID\n", " stories = []\n", " for story_id in story_ids:\n", " try:\n", " # Get the story details\n", " story_response = requests.get(\n", " f\"https://hacker-news.firebaseio.com/v0/item/{story_id}.json\"\n", " )\n", " story = story_response.json()\n", "\n", " # Skip if not a story or missing key fields\n", " if not story or story.get(\"type\") != \"story\" or \"title\" not in story:\n", " continue\n", "\n", " # Add to our list\n", " stories.append(\n", " {\n", " \"id\": story.get(\"id\"),\n", " \"title\": story.get(\"title\"),\n", " \"url\": story.get(\"url\", \"\"),\n", " \"score\": story.get(\"score\", 0),\n", " \"by\": story.get(\"by\", \"\"),\n", " \"time\": datetime.fromtimestamp(story.get(\"time\", 0)),\n", " \"descendants\": story.get(\"descendants\", 0),\n", " }\n", " )\n", "\n", " # Brief pause to avoid overloading the API\n", " time.sleep(0.1)\n", "\n", " except Exception as e:\n", " print(f\"Error fetching story {story_id}: {e}\")\n", "\n", " # Convert the list of stories to a DataFrame\n", " df = pd.DataFrame(stories)\n", "\n", " # Add a timestamp for when the data was fetched\n", " df[\"fetched_at\"] = datetime.now()\n", " print(f\"{df=}\")\n", "\n", " return df" ] }, { "cell_type": "code", "execution_count": 10, "metadata": {}, "outputs": [ { "data": { "text/html": [ "
| \n", " | id | \n", "title | \n", "url | \n", "score | \n", "by | \n", "time | \n", "descendants | \n", "fetched_at | \n", "
|---|---|---|---|---|---|---|---|---|
| 0 | \n", "43410061 | \n", "The Lost Art of Research as Leisure | \n", "https://kasurian.com/p/research-as-leisure | \n", "166 | \n", "altilunium | \n", "2025-03-19 10:09:15 | \n", "77 | \n", "2025-03-19 13:25:12.563609 | \n", "
| 1 | \n", "43400989 | \n", "Two new PebbleOS watches | \n", "https://ericmigi.com/blog/introducing-two-new-... | \n", "1441 | \n", "griffinli | \n", "2025-03-18 15:59:27 | \n", "428 | \n", "2025-03-19 13:25:12.563609 | \n", "
| 2 | \n", "43406710 | \n", "Make Ubuntu packages 90% faster by rebuilding ... | \n", "https://gist.github.com/jwbee/7e8b27e298de8bbb... | \n", "446 | \n", "jeffbee | \n", "2025-03-18 23:55:17 | \n", "258 | \n", "2025-03-19 13:25:12.563609 | \n", "
| 3 | \n", "43401245 | \n", "Apple restricts Pebble from being awesome with... | \n", "https://ericmigi.com/blog/apple-restricts-pebb... | \n", "1556 | \n", "griffinli | \n", "2025-03-18 16:23:21 | \n", "958 | \n", "2025-03-19 13:25:12.563609 | \n", "
| 4 | \n", "43410885 | \n", "The Origin of the Pork Taboo | \n", "https://archaeology.org/issues/march-april-202... | \n", "3 | \n", "diodorus | \n", "2025-03-19 12:16:06 | \n", "0 | \n", "2025-03-19 13:25:12.563609 | \n", "