81 lines
2.3 KiB
Python
81 lines
2.3 KiB
Python
from typing import Any, Dict, List
|
|
|
|
from fastapi import FastAPI, HTTPException
|
|
from pydantic import BaseModel
|
|
|
|
LEVELS = ["public", "interclan", "incircle", "soulsafe", "sacred"]
|
|
LEVEL_RANK = {k: i for i, k in enumerate(LEVELS)}
|
|
SENSITIVE_KEYWORDS = [
|
|
"child", "children", "minor", "health", "trauma", "violence", "abuse",
|
|
"ребен", "дитин", "здоров", "травм", "насил",
|
|
]
|
|
|
|
app = FastAPI(title="CLAN Visibility Guard", version="1.0.0")
|
|
|
|
|
|
class VisibilityCheck(BaseModel):
|
|
current_level: str
|
|
requested_level: str
|
|
|
|
|
|
class ClassifyRequest(BaseModel):
|
|
text: str
|
|
|
|
|
|
class RedactRequest(BaseModel):
|
|
text: str
|
|
target_level: str
|
|
|
|
|
|
def _norm(level: str) -> str:
|
|
v = (level or "").strip().lower()
|
|
if v not in LEVEL_RANK:
|
|
raise HTTPException(status_code=400, detail=f"invalid_visibility_level:{level}")
|
|
return v
|
|
|
|
|
|
@app.get("/health")
|
|
def health() -> Dict[str, Any]:
|
|
return {"status": "ok", "service": "clan-visibility-guard"}
|
|
|
|
|
|
@app.post("/visibility/check_downgrade")
|
|
def check_downgrade(body: VisibilityCheck) -> Dict[str, Any]:
|
|
cur = _norm(body.current_level)
|
|
req = _norm(body.requested_level)
|
|
allowed = LEVEL_RANK[req] >= LEVEL_RANK[cur]
|
|
return {
|
|
"allowed": allowed,
|
|
"reason": "ok" if allowed else "downgrade_requires_consent",
|
|
"current_level": cur,
|
|
"requested_level": req,
|
|
}
|
|
|
|
|
|
@app.post("/visibility/classify")
|
|
def classify(body: ClassifyRequest) -> Dict[str, Any]:
|
|
t = (body.text or "").lower()
|
|
flags: List[str] = [k for k in SENSITIVE_KEYWORDS if k in t]
|
|
recommended = "soulsafe" if flags else "incircle"
|
|
if any(x in t for x in ["sacred", "сакрал", "духовн"]):
|
|
recommended = "sacred"
|
|
return {
|
|
"recommended_level": recommended,
|
|
"sensitivity_flags": flags,
|
|
}
|
|
|
|
|
|
@app.post("/visibility/redact_for_level")
|
|
def redact_for_level(body: RedactRequest) -> Dict[str, Any]:
|
|
target = _norm(body.target_level)
|
|
txt = body.text or ""
|
|
redacted = txt
|
|
if target in {"public", "interclan", "incircle"}:
|
|
for token in ["ребен", "дитин", "здоров", "травм", "насил", "health", "trauma", "violence", "abuse"]:
|
|
redacted = redacted.replace(token, "[sensitive]")
|
|
return {
|
|
"target_level": target,
|
|
"redacted_text": redacted,
|
|
"changed": redacted != txt,
|
|
}
|