Memories
Store, retrieve, update, and delete memories. Memories are the core building blocks of Hebbrix - they store knowledge that your AI can recall later.
Overview
A memory is a piece of information that you want your AI to remember. This could be:
- User preferences ("User prefers dark mode")
- Facts from conversations ("User's birthday is January 15th")
- Knowledge from documents ("The API rate limit is 100 requests per minute")
- Any structured or unstructured data you want to store
Endpoints
Code Examples
Create a Memory
import asyncio
from hebbrix import MemoryClient
async def main():
async with MemoryClient(api_key="mem_sk_...") as client:
collection = await client.collections.create(name="my-agent")
# Simple memory (SDK calls the smart-extract POST /v1/memories endpoint)
memory = await client.memories.create(
collection_id=collection["id"],
content="User's favorite color is blue",
)
# Memory with importance + metadata (tags live inside metadata)
await client.memories.create(
collection_id=collection["id"],
content="User completed Python certification on 2024-01-15",
importance=0.8,
metadata={
"tags": ["education", "python"],
"certification_provider": "Coursera",
"completion_date": "2024-01-15",
},
)
asyncio.run(main())Process Conversations (fact extraction)
import os
import requests
BASE = "https://api.hebbrix.com/v1"
H = {"Authorization": f"Bearer {os.environ['HEBBRIX_API_KEY']}"}
# POST /v1/memories/process — hand it a chat transcript; the backend
# extracts atomic facts, detects conflicts, and emits ADD/UPDATE/NOOP
# events for each resulting memory.
r = requests.post(
f"{BASE}/memories/process",
headers=H,
json={
"messages": [
{"role": "user", "content": "Hi! My name is Alex and I work at Acme Corp."},
{"role": "assistant", "content": "Nice to meet you, Alex!"},
]
},
)
result = r.json()
print(f"Facts extracted: {result['facts_extracted']}")
print(f"Created: {result['memories_created']}")
print(f"Updated: {result['memories_updated']}")
# Later, user corrects their name — the old memory is updated
r = requests.post(
f"{BASE}/memories/process",
headers=H,
json={
"messages": [
{"role": "user", "content": "Actually, my name is Jordan, not Alex."},
{"role": "assistant", "content": "No problem, Jordan!"},
]
},
)
result = r.json()
# result["events"][0]["event"] == "UPDATE"
# result["events"][0]["previous_content"] == "Alex is the user's name"Batch Create
import os
import requests
BASE = "https://api.hebbrix.com/v1"
H = {"Authorization": f"Bearer {os.environ['HEBBRIX_API_KEY']}"}
# POST /v1/memories/batch — create multiple memories in one request (max 100).
# Note: collection_id is top-level, NOT per-memory.
memories = [
{"content": "User prefers email over phone calls"},
{"content": "User works in software development"},
{"content": "User is based in EST timezone"},
]
r = requests.post(
f"{BASE}/memories/batch",
headers=H,
json={
"memories": memories,
"collection_id": "col_default",
},
)
result = r.json()
print(f"Created {result['created']} memories")List and Filter (SDK)
import asyncio
import os
from hebbrix import MemoryClient
async def main():
async with MemoryClient(api_key=os.environ["HEBBRIX_API_KEY"]) as client:
# --- Explicit page-at-a-time access ---
page = await client.memories.list_page(limit=50)
# page = {"items": [...], "next_cursor": str | None,
# "has_more": bool, "total_count": int}
print(f"Showing {len(page['items'])} of {page['total_count']} memories")
# Fetch the next page if there is one
if page["has_more"]:
next_page = await client.memories.list_page(
cursor=page["next_cursor"], limit=50
)
print(f"Next page has {len(next_page['items'])} memories")
# Filter by collection
page = await client.memories.list_page(
collection_id="col_customer_support", limit=20
)
# --- Simple "give me everything" iteration ---
# iter_all follows next_cursor until has_more == False
async for memory in client.memories.iter_all(collection_id="col_customer_support"):
print(memory["id"], memory["content"][:80])
asyncio.run(main())List and Filter (raw HTTP)
import os
import requests
BASE = "https://api.hebbrix.com/v1"
H = {"Authorization": f"Bearer {os.environ['HEBBRIX_API_KEY']}"}
# List all memories (cursor-paginated response)
r = requests.get(f"{BASE}/memories", headers=H, params={"limit": 50})
page = r.json() # {"items": [...], "next_cursor": ..., "has_more": ..., "total_count": ...}
# Paginate through all
cursor = None
while True:
params = {"limit": 100}
if cursor:
params["cursor"] = cursor
page = requests.get(f"{BASE}/memories", headers=H, params=params).json()
for memory in page["items"]:
process(memory)
if not page.get("has_more") or not page.get("next_cursor"):
break
cursor = page["next_cursor"]cURL Examples
/v1/memoriescurl -X POST "https://api.hebbrix.com/v1/memories" \
-H "Authorization: Bearer YOUR_API_KEY" \
-H "Content-Type: application/json" \
-d '{
"content": "User prefers dark mode",
"tags": [
"preferences"
]
}'/v1/memories?limit=10curl -X GET "https://api.hebbrix.com/v1/memories?limit=10" \
-H "Authorization: Bearer YOUR_API_KEY" \
-H "Content-Type: application/json"/v1/memories/mem_abc123curl -X DELETE "https://api.hebbrix.com/v1/memories/mem_abc123" \
-H "Authorization: Bearer YOUR_API_KEY" \
-H "Content-Type: application/json"/v1/memories/processcurl -X POST "https://api.hebbrix.com/v1/memories/process" \
-H "Authorization: Bearer YOUR_API_KEY" \
-H "Content-Type: application/json" \
-d '{
"messages": [
{
"role": "user",
"content": "My name is Jordan and I work at Acme Corp."
},
{
"role": "assistant",
"content": "Nice to meet you, Jordan!"
}
]
}'/v1/memories/bulk-deletecurl -X POST "https://api.hebbrix.com/v1/memories/bulk-delete" \
-H "Authorization: Bearer YOUR_API_KEY" \
-H "Content-Type: application/json" \
-d '{
"ids": [
"mem_abc123",
"mem_def456"
]
}'/v1/memories/allcurl -X DELETE "https://api.hebbrix.com/v1/memories/all" \
-H "Authorization: Bearer YOUR_API_KEY" \
-H "Content-Type: application/json"Memory Lifecycle
When you create a memory, Hebbrix automatically:
Embeds the Content
Converts text to vector embeddings for semantic search.
Calculates Importance
Analyzes content to determine relevance and priority.
Extracts Entities
Identifies people, places, concepts and adds to knowledge graph.
Indexes for Search
Adds to BM25 index for keyword matching alongside vectors.
Detects Conflicts
When using /process, checks for conflicting memories and automatically updates or removes outdated information.
Full pipeline propagation: When a memory is updated or deleted, all storage layers are re-indexed — embeddings, vectors, BM25, knowledge graph, propositions, and tiers are all kept in sync automatically.
