Ezer Mishpati - AI legal decision drafting system with: - MCP server (FastMCP) with document processing pipeline - Web upload interface (FastAPI) for file upload and classification - pgvector-based semantic search - Hebrew legal document chunking and embedding
119 lines
4.1 KiB
Python
119 lines
4.1 KiB
Python
"""MCP tools for workflow status tracking."""
|
||
|
||
from __future__ import annotations
|
||
|
||
import json
|
||
from uuid import UUID
|
||
|
||
from legal_mcp.services import db
|
||
|
||
|
||
async def workflow_status(case_number: str) -> str:
|
||
"""סטטוס תהליך עבודה מלא לתיק - מסמכים, עיבוד, טיוטות.
|
||
|
||
Args:
|
||
case_number: מספר תיק הערר
|
||
"""
|
||
case = await db.get_case_by_number(case_number)
|
||
if not case:
|
||
return f"תיק {case_number} לא נמצא."
|
||
|
||
case_id = UUID(case["id"])
|
||
docs = await db.list_documents(case_id)
|
||
|
||
# Count chunks per document
|
||
pool = await db.get_pool()
|
||
async with pool.acquire() as conn:
|
||
chunk_counts = await conn.fetch(
|
||
"SELECT document_id, COUNT(*) as count FROM document_chunks WHERE case_id = $1 GROUP BY document_id",
|
||
case_id,
|
||
)
|
||
chunk_map = {str(r["document_id"]): r["count"] for r in chunk_counts}
|
||
|
||
doc_status = []
|
||
for doc in docs:
|
||
doc_status.append({
|
||
"title": doc["title"],
|
||
"type": doc["doc_type"],
|
||
"extraction": doc["extraction_status"],
|
||
"chunks": chunk_map.get(doc["id"], 0),
|
||
"pages": doc.get("page_count"),
|
||
})
|
||
|
||
# Check draft status
|
||
from pathlib import Path
|
||
from legal_mcp import config
|
||
|
||
case_dir = config.CASES_DIR / case_number
|
||
draft_path = case_dir / "drafts" / "decision.md"
|
||
has_draft = draft_path.exists()
|
||
draft_size = draft_path.stat().st_size if has_draft else 0
|
||
|
||
status = {
|
||
"case_number": case["case_number"],
|
||
"title": case["title"],
|
||
"status": case["status"],
|
||
"documents": doc_status,
|
||
"total_documents": len(docs),
|
||
"total_chunks": sum(chunk_map.values()),
|
||
"has_draft": has_draft,
|
||
"draft_size_bytes": draft_size,
|
||
"next_steps": _suggest_next_steps(case, docs, has_draft),
|
||
}
|
||
|
||
return json.dumps(status, ensure_ascii=False, indent=2)
|
||
|
||
|
||
def _suggest_next_steps(case: dict, docs: list, has_draft: bool) -> list[str]:
|
||
"""Suggest next steps based on case state."""
|
||
steps = []
|
||
doc_types = {d["doc_type"] for d in docs}
|
||
|
||
if not docs:
|
||
steps.append("העלה מסמכים לתיק (כתב ערר, תשובת ועדה)")
|
||
else:
|
||
if "appeal" not in doc_types:
|
||
steps.append("העלה כתב ערר")
|
||
if "response" not in doc_types:
|
||
steps.append("העלה תשובת ועדה/משיבים")
|
||
|
||
pending = [d for d in docs if d["extraction_status"] == "pending"]
|
||
if pending:
|
||
steps.append(f"עיבוד {len(pending)} מסמכים ממתינים")
|
||
|
||
if docs and not has_draft:
|
||
steps.append("התחל ניסוח טיוטת החלטה (/draft-decision)")
|
||
elif has_draft and case["status"] in ("new", "in_progress"):
|
||
steps.append("סקור ועדכן את הטיוטה")
|
||
steps.append("עדכן סטטוס ל-drafted")
|
||
|
||
if case["status"] == "drafted":
|
||
steps.append("סקירה סופית ועדכון סטטוס ל-reviewed")
|
||
elif case["status"] == "reviewed":
|
||
steps.append("אישור סופי ועדכון סטטוס ל-final")
|
||
|
||
return steps
|
||
|
||
|
||
async def processing_status() -> str:
|
||
"""סטטוס כללי - מספר תיקים, מסמכים ממתינים לעיבוד."""
|
||
pool = await db.get_pool()
|
||
async with pool.acquire() as conn:
|
||
case_count = await conn.fetchval("SELECT COUNT(*) FROM cases")
|
||
doc_count = await conn.fetchval("SELECT COUNT(*) FROM documents")
|
||
pending_count = await conn.fetchval(
|
||
"SELECT COUNT(*) FROM documents WHERE extraction_status = 'pending'"
|
||
)
|
||
chunk_count = await conn.fetchval("SELECT COUNT(*) FROM document_chunks")
|
||
corpus_count = await conn.fetchval("SELECT COUNT(*) FROM style_corpus")
|
||
pattern_count = await conn.fetchval("SELECT COUNT(*) FROM style_patterns")
|
||
|
||
return json.dumps({
|
||
"cases": case_count,
|
||
"documents": doc_count,
|
||
"pending_processing": pending_count,
|
||
"chunks": chunk_count,
|
||
"style_corpus_entries": corpus_count,
|
||
"style_patterns": pattern_count,
|
||
}, ensure_ascii=False, indent=2)
|