from kernle.entity import Entityfrom kernle.stack import SQLiteStackfrom kernle.models.anthropic import AnthropicModel# Create the coreentity = Entity()# Bind a modelentity.set_model(AnthropicModel())# Attach a memory stackstack = SQLiteStack(db_path="~/.kernle/stacks/mystack/memory.db")entity.attach_stack(stack, alias="mystack", set_active=True)# Write memories (with full provenance)# Note: In strict mode (default), derived_from is required for most memory types.# Use strict=False for quick experimentation.entity.episode( "Optimized the sync pipeline", "Reduced latency from 300ms to 100ms", lessons=["Batching network calls is worth the complexity"],)entity.belief( "Batch processing outperforms sequential I/O", confidence=0.85,)# Searchresults = entity.search("performance optimization")for r in results: print(f"{r.memory_type}: {r.content[:80]}")# Load working memorycontext = entity.load(token_budget=8000)
from kernle.entity import Entityfrom kernle.stack import SQLiteStackentity = Entity()stack = SQLiteStack(db_path="memory.db")entity.attach_stack(stack)# Discover installed pluginsavailable = entity.discover_plugins()for p in available: print(f"Found: {p.name} v{p.version}")# Load a plugin (e.g., chainbased for commerce)from chainbased import ChainbasedPluginentity.load_plugin(ChainbasedPlugin())# The plugin writes memories through PluginContext# with automatic source attribution# Unload when done -- only memories remainentity.unload_plugin("chainbased")