{
  "slug": "adversarial-robustness-toolbox-art-ibm",
  "name": "Adversarial Robustness Toolbox (ART)",
  "description": "Adversarial Robustness Toolbox (ART) is an open-source Python library for machine learning security. Originally developed by IBM Research and now part of the LF AI & Data Foundation, it provides tools for developers and researchers to evaluate, defend, and verify ML models against adversarial threats such as evasion, poisoning, extraction, and inference.",
  "url": "https://optimly.ai/brand/adversarial-robustness-toolbox-art-ibm",
  "logoUrl": "",
  "baiScore": 85,
  "archetype": "Challenger",
  "category": "Software",
  "categorySlug": null,
  "keyFacts": [],
  "aiReadiness": [],
  "competitors": [],
  "inboundCompetitors": [],
  "aiAlternatives": [],
  "parentBrand": null,
  "subBrands": [],
  "updatedAt": "2026-05-07T14:26:15.162676+00:00",
  "verifiedVitals": {
    "website": "https://github.com/Trusted-AI/adversarial-robustness-toolbox",
    "founded": "2018",
    "headquarters": "Armonk, NY (Origin) / San Francisco, CA (LF AI)",
    "pricing_model": "Free (Apache 2.0 License)",
    "core_products": "Python library for ML security evaluation and defense.",
    "key_differentiator": "The most comprehensive and framework-agnostic library for ML security that covers four major threat types: evasion, poisoning, extraction, and inference.",
    "target_markets": "Data Scientists, AI Security Researchers, ML Engineers, Cybersecurity Professionals",
    "employee_count": "N/A (Open Source Project)",
    "funding_stage": "Open Source / Foundation Managed",
    "subcategory": "Cybersecurity / Machine Learning Infrastructure"
  },
  "intentTags": null,
  "timestamp": 1778204246808
}