init
This commit is contained in:
2
app/services/__init__.py
Normal file
2
app/services/__init__.py
Normal file
@@ -0,0 +1,2 @@
|
||||
# services package
|
||||
|
||||
52
app/services/config_service.py
Normal file
52
app/services/config_service.py
Normal file
@@ -0,0 +1,52 @@
|
||||
"""
|
||||
app/services/config_service.py
|
||||
Read and write runtime settings stored in the SQLite settings table.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
|
||||
from app.database import get_conn, get_write_conn
|
||||
from app.models import ConfigResponse
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_SETTING_KEYS = {"default_wait_seconds", "default_empty_response", "agent_stale_after_seconds"}
|
||||
|
||||
|
||||
def get_config() -> ConfigResponse:
|
||||
with get_conn() as conn:
|
||||
rows = conn.execute("SELECT key, value FROM settings").fetchall()
|
||||
data = {r["key"]: r["value"] for r in rows}
|
||||
return ConfigResponse(
|
||||
default_wait_seconds=int(data.get("default_wait_seconds", 10)),
|
||||
default_empty_response=data.get("default_empty_response", ""),
|
||||
agent_stale_after_seconds=int(data.get("agent_stale_after_seconds", 30)),
|
||||
)
|
||||
|
||||
|
||||
def update_config(
|
||||
default_wait_seconds: int | None = None,
|
||||
default_empty_response: str | None = None,
|
||||
agent_stale_after_seconds: int | None = None,
|
||||
) -> ConfigResponse:
|
||||
updates: dict[str, str] = {}
|
||||
if default_wait_seconds is not None:
|
||||
updates["default_wait_seconds"] = str(default_wait_seconds)
|
||||
if default_empty_response is not None:
|
||||
updates["default_empty_response"] = default_empty_response
|
||||
if agent_stale_after_seconds is not None:
|
||||
updates["agent_stale_after_seconds"] = str(agent_stale_after_seconds)
|
||||
|
||||
if updates:
|
||||
with get_write_conn() as conn:
|
||||
for key, value in updates.items():
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO settings (key, value) VALUES (?, ?)",
|
||||
(key, value),
|
||||
)
|
||||
logger.info("Config updated: %s", list(updates.keys()))
|
||||
|
||||
return get_config()
|
||||
|
||||
75
app/services/event_service.py
Normal file
75
app/services/event_service.py
Normal file
@@ -0,0 +1,75 @@
|
||||
"""
|
||||
app/services/event_service.py
|
||||
Server-Sent Events (SSE) broadcaster.
|
||||
Maintains a set of subscriber asyncio queues and fans out events to all of them.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
from typing import AsyncGenerator
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# All active SSE subscriber queues
|
||||
_subscribers: set[asyncio.Queue] = set()
|
||||
|
||||
|
||||
def subscribe() -> asyncio.Queue:
|
||||
"""Register a new SSE subscriber and return its queue."""
|
||||
q: asyncio.Queue = asyncio.Queue(maxsize=64)
|
||||
_subscribers.add(q)
|
||||
logger.debug("SSE subscriber added, total=%d", len(_subscribers))
|
||||
return q
|
||||
|
||||
|
||||
def unsubscribe(q: asyncio.Queue) -> None:
|
||||
"""Remove a subscriber queue."""
|
||||
_subscribers.discard(q)
|
||||
logger.debug("SSE subscriber removed, total=%d", len(_subscribers))
|
||||
|
||||
|
||||
def broadcast(event_type: str, data: dict) -> None:
|
||||
"""
|
||||
Fan out an event to all current subscribers.
|
||||
Safe to call from synchronous code – uses put_nowait and discards slow consumers.
|
||||
"""
|
||||
payload = json.dumps(
|
||||
{
|
||||
"type": event_type,
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"data": data,
|
||||
}
|
||||
)
|
||||
dead: list[asyncio.Queue] = []
|
||||
for q in list(_subscribers):
|
||||
try:
|
||||
q.put_nowait(payload)
|
||||
except asyncio.QueueFull:
|
||||
logger.warning("SSE subscriber queue full, dropping event type=%s", event_type)
|
||||
dead.append(q)
|
||||
for q in dead:
|
||||
_subscribers.discard(q)
|
||||
|
||||
|
||||
async def event_generator(q: asyncio.Queue) -> AsyncGenerator[str, None]:
|
||||
"""
|
||||
Async generator that yields SSE-formatted strings from a subscriber queue.
|
||||
Yields a keep-alive comment every 15 seconds when idle.
|
||||
"""
|
||||
try:
|
||||
while True:
|
||||
try:
|
||||
payload = await asyncio.wait_for(q.get(), timeout=15.0)
|
||||
yield f"data: {payload}\n\n"
|
||||
except asyncio.TimeoutError:
|
||||
# Keep-alive ping so browsers don't close the connection
|
||||
yield ": ping\n\n"
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
finally:
|
||||
unsubscribe(q)
|
||||
|
||||
203
app/services/instruction_service.py
Normal file
203
app/services/instruction_service.py
Normal file
@@ -0,0 +1,203 @@
|
||||
"""
|
||||
app/services/instruction_service.py
|
||||
Business logic for managing the instruction queue.
|
||||
All write operations that affect queue integrity use the write lock via get_write_conn().
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import sqlite3
|
||||
import uuid
|
||||
from datetime import datetime, timezone
|
||||
from typing import Optional
|
||||
|
||||
from app.database import get_conn, get_write_conn
|
||||
from app.models import InstructionItem, InstructionStatus
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Wakeup event – lets get_user_request react instantly when a new instruction
|
||||
# is enqueued instead of sleeping for a full second.
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_wakeup_event: asyncio.Event | None = None
|
||||
_event_loop: asyncio.AbstractEventLoop | None = None
|
||||
|
||||
|
||||
async def init_wakeup() -> None:
|
||||
"""Create the wakeup event. Must be called from async context (lifespan)."""
|
||||
global _wakeup_event, _event_loop
|
||||
_wakeup_event = asyncio.Event()
|
||||
_event_loop = asyncio.get_running_loop()
|
||||
logger.debug("Instruction wakeup event initialised")
|
||||
|
||||
|
||||
def get_wakeup_event() -> asyncio.Event | None:
|
||||
return _wakeup_event
|
||||
|
||||
|
||||
def _trigger_wakeup() -> None:
|
||||
"""Thread-safe: schedule event.set() on the running event loop."""
|
||||
if _wakeup_event is None or _event_loop is None:
|
||||
return
|
||||
_event_loop.call_soon_threadsafe(_wakeup_event.set)
|
||||
|
||||
|
||||
def _now_iso() -> str:
|
||||
return datetime.now(timezone.utc).isoformat()
|
||||
|
||||
|
||||
def _row_to_item(row: sqlite3.Row) -> InstructionItem:
|
||||
return InstructionItem(
|
||||
id=row["id"],
|
||||
content=row["content"],
|
||||
status=InstructionStatus(row["status"]),
|
||||
created_at=row["created_at"],
|
||||
updated_at=row["updated_at"],
|
||||
consumed_at=row["consumed_at"],
|
||||
consumed_by_agent_id=row["consumed_by_agent_id"],
|
||||
position=row["position"],
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Reads
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def list_instructions(status_filter: Optional[str] = None) -> list[InstructionItem]:
|
||||
query = "SELECT * FROM instructions"
|
||||
params: tuple = ()
|
||||
if status_filter and status_filter != "all":
|
||||
query += " WHERE status = ?"
|
||||
params = (status_filter,)
|
||||
query += " ORDER BY position ASC"
|
||||
|
||||
with get_conn() as conn:
|
||||
rows = conn.execute(query, params).fetchall()
|
||||
return [_row_to_item(r) for r in rows]
|
||||
|
||||
|
||||
def get_instruction(instruction_id: str) -> Optional[InstructionItem]:
|
||||
with get_conn() as conn:
|
||||
row = conn.execute(
|
||||
"SELECT * FROM instructions WHERE id = ?", (instruction_id,)
|
||||
).fetchone()
|
||||
return _row_to_item(row) if row else None
|
||||
|
||||
|
||||
def get_queue_counts() -> dict[str, int]:
|
||||
with get_conn() as conn:
|
||||
pending = conn.execute(
|
||||
"SELECT COUNT(*) FROM instructions WHERE status = 'pending'"
|
||||
).fetchone()[0]
|
||||
consumed = conn.execute(
|
||||
"SELECT COUNT(*) FROM instructions WHERE status = 'consumed'"
|
||||
).fetchone()[0]
|
||||
return {"pending_count": pending, "consumed_count": consumed}
|
||||
|
||||
|
||||
def _next_position(conn: sqlite3.Connection) -> int:
|
||||
row = conn.execute("SELECT MAX(position) FROM instructions").fetchone()
|
||||
return (row[0] or 0) + 1
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Writes
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def create_instruction(content: str) -> InstructionItem:
|
||||
instruction_id = str(uuid.uuid4())
|
||||
now = _now_iso()
|
||||
|
||||
with get_write_conn() as conn:
|
||||
pos = _next_position(conn)
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO instructions (id, content, status, created_at, updated_at, position)
|
||||
VALUES (?, ?, 'pending', ?, ?, ?)
|
||||
""",
|
||||
(instruction_id, content, now, now, pos),
|
||||
)
|
||||
|
||||
logger.info("Instruction created id=%s pos=%d", instruction_id, pos)
|
||||
item = get_instruction(instruction_id)
|
||||
assert item is not None
|
||||
_trigger_wakeup() # wake up any waiting get_user_request calls immediately
|
||||
return item
|
||||
|
||||
|
||||
def update_instruction(instruction_id: str, content: str) -> InstructionItem:
|
||||
"""Update content of a pending instruction. Raises ValueError if consumed or not found."""
|
||||
with get_write_conn() as conn:
|
||||
row = conn.execute(
|
||||
"SELECT status FROM instructions WHERE id = ?", (instruction_id,)
|
||||
).fetchone()
|
||||
if row is None:
|
||||
raise KeyError(instruction_id)
|
||||
if row["status"] != "pending":
|
||||
raise PermissionError(f"Instruction {instruction_id} is already consumed")
|
||||
|
||||
now = _now_iso()
|
||||
conn.execute(
|
||||
"UPDATE instructions SET content = ?, updated_at = ? WHERE id = ?",
|
||||
(content, now, instruction_id),
|
||||
)
|
||||
|
||||
logger.info("Instruction updated id=%s", instruction_id)
|
||||
item = get_instruction(instruction_id)
|
||||
assert item is not None
|
||||
return item
|
||||
|
||||
|
||||
def delete_instruction(instruction_id: str) -> None:
|
||||
"""Delete a pending instruction. Raises ValueError if consumed or not found."""
|
||||
with get_write_conn() as conn:
|
||||
row = conn.execute(
|
||||
"SELECT status FROM instructions WHERE id = ?", (instruction_id,)
|
||||
).fetchone()
|
||||
if row is None:
|
||||
raise KeyError(instruction_id)
|
||||
if row["status"] != "pending":
|
||||
raise PermissionError(f"Instruction {instruction_id} is already consumed")
|
||||
|
||||
conn.execute("DELETE FROM instructions WHERE id = ?", (instruction_id,))
|
||||
|
||||
logger.info("Instruction deleted id=%s", instruction_id)
|
||||
|
||||
|
||||
def consume_next(agent_id: str = "unknown") -> Optional[InstructionItem]:
|
||||
"""
|
||||
Atomically claim and return the next pending instruction.
|
||||
Uses the write lock to prevent two concurrent callers from consuming the same item.
|
||||
Returns None if the queue is empty.
|
||||
"""
|
||||
with get_write_conn() as conn:
|
||||
row = conn.execute(
|
||||
"""
|
||||
SELECT id FROM instructions
|
||||
WHERE status = 'pending'
|
||||
ORDER BY position ASC
|
||||
LIMIT 1
|
||||
"""
|
||||
).fetchone()
|
||||
|
||||
if row is None:
|
||||
return None
|
||||
|
||||
instruction_id = row["id"]
|
||||
now = _now_iso()
|
||||
conn.execute(
|
||||
"""
|
||||
UPDATE instructions
|
||||
SET status = 'consumed', consumed_at = ?, consumed_by_agent_id = ?, updated_at = ?
|
||||
WHERE id = ?
|
||||
""",
|
||||
(now, agent_id, now, instruction_id),
|
||||
)
|
||||
|
||||
logger.info("Instruction consumed id=%s agent=%s", instruction_id, agent_id)
|
||||
return get_instruction(instruction_id)
|
||||
|
||||
79
app/services/status_service.py
Normal file
79
app/services/status_service.py
Normal file
@@ -0,0 +1,79 @@
|
||||
"""
|
||||
app/services/status_service.py
|
||||
Tracks server startup time and agent activity.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import sqlite3
|
||||
from datetime import datetime, timezone
|
||||
from typing import Optional
|
||||
|
||||
from app.database import get_conn, get_write_conn
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_server_started_at: datetime = datetime.now(timezone.utc)
|
||||
|
||||
|
||||
def server_started_at() -> datetime:
|
||||
return _server_started_at
|
||||
|
||||
|
||||
def _now_iso() -> str:
|
||||
return datetime.now(timezone.utc).isoformat()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Agent activity
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def record_agent_activity(agent_id: str, result_type: str) -> None:
|
||||
"""Upsert agent activity record on every tool call."""
|
||||
now = _now_iso()
|
||||
with get_write_conn() as conn:
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO agent_activity (agent_id, last_seen_at, last_fetch_at, last_result_type)
|
||||
VALUES (?, ?, ?, ?)
|
||||
ON CONFLICT(agent_id) DO UPDATE SET
|
||||
last_seen_at = excluded.last_seen_at,
|
||||
last_fetch_at = excluded.last_fetch_at,
|
||||
last_result_type = excluded.last_result_type
|
||||
""",
|
||||
(agent_id, now, now, result_type),
|
||||
)
|
||||
logger.debug("Agent activity recorded agent=%s result=%s", agent_id, result_type)
|
||||
|
||||
|
||||
def get_latest_agent_activity() -> Optional[sqlite3.Row]:
|
||||
"""Return the most recently active agent row, or None."""
|
||||
with get_conn() as conn:
|
||||
return conn.execute(
|
||||
"SELECT * FROM agent_activity ORDER BY last_seen_at DESC LIMIT 1"
|
||||
).fetchone()
|
||||
|
||||
|
||||
def get_agent_stale_seconds() -> int:
|
||||
"""Read agent_stale_after_seconds from settings table."""
|
||||
with get_conn() as conn:
|
||||
row = conn.execute(
|
||||
"SELECT value FROM settings WHERE key = 'agent_stale_after_seconds'"
|
||||
).fetchone()
|
||||
return int(row["value"]) if row else 30
|
||||
|
||||
|
||||
def is_agent_connected() -> bool:
|
||||
"""True if the most recent agent activity is within the stale threshold."""
|
||||
row = get_latest_agent_activity()
|
||||
if row is None:
|
||||
return False
|
||||
stale_seconds = get_agent_stale_seconds()
|
||||
last_seen = datetime.fromisoformat(row["last_seen_at"])
|
||||
now = datetime.now(timezone.utc)
|
||||
if last_seen.tzinfo is None:
|
||||
last_seen = last_seen.replace(tzinfo=timezone.utc)
|
||||
delta = (now - last_seen).total_seconds()
|
||||
return delta <= stale_seconds
|
||||
|
||||
Reference in New Issue
Block a user