Инициализировать базовый каркас MVP с чат-агентом на Agno и Ollama.

Добавить входную точку CLI, конфигурацию через .env и базовую документацию для быстрого локального запуска.
This commit is contained in:
Barabashka
2026-04-21 12:02:31 +03:00
commit 2aa0082743
7 changed files with 175 additions and 0 deletions
+6
View File
@@ -0,0 +1,6 @@
OLLAMA_MODEL_ID=gemma4:31b
OLLAMA_HOST=http://localhost:11435
OLLAMA_TEMPERATURE=0
AGENT_MARKDOWN=false
AGENT_DEBUG_MODE=true
AGENT_INSTRUCTIONS=You are a helpful assistant. Answer briefly and clearly.
+27
View File
@@ -0,0 +1,27 @@
# Python cache and bytecode
__pycache__/
*.py[cod]
*$py.class
# Virtual environments
.venv/
venv/
# Environment files
.env
# Build and packaging
build/
dist/
*.egg-info/
# Test and tool caches
.pytest_cache/
.mypy_cache/
.ruff_cache/
# IDE and OS files
.idea/
.vscode/
.DS_Store
.cursor
+40
View File
@@ -0,0 +1,40 @@
# Prisma Platform MVP
Minimal chat agent on Agno + Ollama.
## Current structure
```text
prisma_platform/
├── .env
├── .env.example
├── requirements.txt
└── src/
├── __init__.py
├── agent_runner.py
└── main.py
```
## Setup
```bash
python -m venv .venv
source .venv/bin/activate
pip install -r requirements.txt
cp .env.example .env
```
## Run
Interactive chat mode:
```bash
python -m src.main
```
Single message mode:
```bash
python -m src.main --message "Привет, что ты умеешь?"
```
+5
View File
@@ -0,0 +1,5 @@
agno
python-dotenv
ollama
socksio
openai
+1
View File
@@ -0,0 +1 @@
# Base package for agent entrypoint.
+53
View File
@@ -0,0 +1,53 @@
import os
from agno.agent import Agent
from agno.models.ollama import Ollama
def _env_bool(name: str, default: bool) -> bool:
value = os.getenv(name)
if value is None:
return default
return value.strip().lower() in {"1", "true", "yes", "on"}
def _env_float(name: str, default: float) -> float:
value = os.getenv(name)
if value is None:
return default
return float(value)
_agent: Agent | None = None
def get_agent() -> Agent:
global _agent
if _agent is not None:
return _agent
model_id = os.getenv("OLLAMA_MODEL_ID", "gemma4:31b")
ollama_host = os.getenv("OLLAMA_HOST", "http://localhost:11435")
temperature = _env_float("OLLAMA_TEMPERATURE", 0.0)
markdown = _env_bool("AGENT_MARKDOWN", False)
debug_mode = _env_bool("AGENT_DEBUG_MODE", True)
instructions = os.getenv(
"AGENT_INSTRUCTIONS",
"You are a helpful assistant. Answer briefly and clearly.",
)
llm = Ollama(id=model_id, host=ollama_host, options={"temperature": temperature})
_agent = Agent(
model=llm,
markdown=markdown,
instructions=instructions,
debug_mode=debug_mode,
)
return _agent
async def run_agent(message: str) -> str:
agent = get_agent()
response = await agent.arun(message)
return str(response.content)
+43
View File
@@ -0,0 +1,43 @@
import argparse
import asyncio
from dotenv import load_dotenv
from src.agent_runner import run_agent
def build_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
description="Run base chat agent.",
)
parser.add_argument(
"--message",
help="Single message mode. If omitted, starts interactive chat.",
)
return parser
async def _main() -> None:
load_dotenv()
args = build_parser().parse_args()
if args.message:
result = await run_agent(args.message)
print(result)
return
print("Chat mode started. Type 'exit' or 'quit' to stop.")
while True:
user_message = input("you> ").strip()
if not user_message:
continue
if user_message.lower() in {"exit", "quit"}:
print("Bye.")
break
result = await run_agent(user_message)
print(f"agent> {result}")
if __name__ == "__main__":
asyncio.run(_main())