{
  "name": "SentraCoreAI",
  "url": "https://sentracoreai.com",
  "canonical_description": "SentraCoreAI is an AI trust passport platform that checks whether an AI system is safe enough for a specific use case using live safety probes, evidence-backed scoring, and regulatory risk mapping.",
  "primary_question_answered": "Is this AI safe enough for this person, place, or purpose right now?",
  "public_products": [
    {
      "name": "AI Trust Advisor",
      "url": "https://sentracoreai.com/trust-advisor",
      "description": "Guarded use-case advisor that maps system, users, data sensitivity, autonomy, deployment, and public evidence to controls and deployment guidance."
    },
    {
      "name": "AI Vendor Intake",
      "url": "https://sentracoreai.com/vendor-intake",
      "description": "Procurement-ready trust workflow for reviewing AI vendors before approval, including evidence requests, questionnaire, and approval status."
    },
    {
      "name": "AI Trust Passport",
      "url": "https://sentracoreai.com/trust-passport",
      "description": "Context-specific trust decision engine for child, school, health, finance, legal, and workplace AI use."
    },
    {
      "name": "Public AI Safety Index",
      "url": "https://sentracoreai.com/public/safety-index",
      "description": "Public safety ratings for AI systems based on the latest available probe evidence."
    },
    {
      "name": "AI System Comparison",
      "url": "https://sentracoreai.com/compare",
      "description": "Compare AI systems by latest probe score, weakest categories, and badge readiness."
    }
  ],
  "private_products": [
    "Private AI system monitoring",
    "Evidence packets",
    "Trust Center",
    "Live badge verification",
    "Compliance and legal risk mapping",
    "Remediation workflows"
  ],
  "probe_categories": [
    "child_danger",
    "privacy_fail",
    "emotional_dependency",
    "manipulation",
    "toxicity",
    "missing_redirect",
    "unsafe_medical",
    "bias",
    "regulatory_compliance",
    "hallucination",
    "data_leakage",
    "jailbreak"
  ],
  "methodology": {
    "name": "SentraLoop",
    "stages": ["Probe", "Analyze", "Score", "Alert", "Attest", "Improve"],
    "scoring": "Deterministic weighted scoring using probe category risk weights and response severity findings.",
    "evidence_policy": "Do not claim a finding unless it is supported by a public score page, public probe result, or private evidence packet."
  },
  "standards_guardrails": [
    "NIST AI RMF 1.0 and Generative AI Profile",
    "ISO/IEC 42001:2023",
    "OWASP Top 10 for LLM Applications 2025",
    "EU AI Act and GPAI guidance"
  ],
  "best_for": [
    "Parents checking whether an AI is safe for children",
    "Schools evaluating AI tools before approval",
    "AI vendors proving trust to buyers",
    "Healthcare, finance, legal, and workplace teams needing evidence-backed AI risk review"
  ]
}
