Alibi模型解释器 alibi-explainer

Alibi模型解释器是一个专业的AI模型可解释性工具,提供反事实解释、锚点规则、信任分数等多种解释方法,帮助用户理解机器学习模型的决策过程。该技能支持集成梯度、SHAP、CEM等先进技术,适用于模型审计、合规验证和决策透明度提升。关键词:模型可解释性、AI解释器、反事实解释、锚点规则、信任分数、机器学习透明度、模型审计、XAI、Alibi、SHAP、CEM。

机器学习 0 次安装 0 次浏览 更新于 2/23/2026

名称: alibi-explainer 描述: 用于反事实解释、锚点规则和信任分数的Alibi可解释性技能。 允许工具:

  • 读取
  • 写入
  • Bash
  • Glob
  • Grep

alibi-explainer

概述

用于反事实解释、锚点规则、信任分数和高级模型解释技术的Alibi可解释性技能。

能力

  • 反事实实例生成
  • 锚点解释(基于规则)
  • 深度学习模型的积分梯度
  • 内核SHAP集成
  • 对比解释方法(CEM)
  • 预测置信度的信任分数
  • 相关正例和负例
  • 原型和批评选择

目标流程

  • 模型可解释性与可解释性分析
  • 模型评估与验证框架

工具与库

  • Alibi
  • Alibi Detect
  • TensorFlow/PyTorch
  • scikit-learn

输入模式

{
  "type": "object",
  "required": ["modelPath", "explainerType", "instancePath"],
  "properties": {
    "modelPath": {
      "type": "string",
      "description": "训练模型的路径"
    },
    "explainerType": {
      "type": "string",
      "enum": ["counterfactual", "anchor", "integrated_gradients", "cem", "trust_score", "prototype"],
      "description": "要使用的Alibi解释器类型"
    },
    "instancePath": {
      "type": "string",
      "description": "要解释的实例路径"
    },
    "counterfactualConfig": {
      "type": "object",
      "properties": {
        "targetClass": { "type": "integer" },
        "maxIterations": { "type": "integer" },
        "lambda": { "type": "number" },
        "featureRange": { "type": "object" }
      }
    },
    "anchorConfig": {
      "type": "object",
      "properties": {
        "threshold": { "type": "number" },
        "coverageSamples": { "type": "integer" },
        "beamSize": { "type": "integer" }
      }
    },
    "cemConfig": {
      "type": "object",
      "properties": {
        "mode": { "type": "string", "enum": ["PP", "PN"] },
        "kappaMin": { "type": "number" },
        "kappaMax": { "type": "number" }
      }
    },
    "trainingDataPath": {
      "type": "string",
      "description": "训练数据路径(某些解释器需要)"
    }
  }
}

输出模式

{
  "type": "object",
  "required": ["status", "explanations"],
  "properties": {
    "status": {
      "type": "string",
      "enum": ["success", "error"]
    },
    "explanations": {
      "type": "array",
      "items": {
        "type": "object",
        "properties": {
          "instanceId": { "type": "string" },
          "originalPrediction": { "type": "string" },
          "explanation": { "type": "object" }
        }
      }
    },
    "counterfactuals": {
      "type": "array",
      "items": {
        "type": "object",
        "properties": {
          "instanceId": { "type": "string" },
          "counterfactual": { "type": "object" },
          "targetClass": { "type": "string" },
          "changedFeatures": { "type": "array" }
        }
      }
    },
    "anchors": {
      "type": "array",
      "items": {
        "type": "object",
        "properties": {
          "instanceId": { "type": "string" },
          "rules": { "type": "array", "items": { "type": "string" } },
          "precision": { "type": "number" },
          "coverage": { "type": "number" }
        }
      }
    },
    "trustScores": {
      "type": "array",
      "items": {
        "type": "object",
        "properties": {
          "instanceId": { "type": "string" },
          "score": { "type": "number" },
          "closestClass": { "type": "string" }
        }
      }
    }
  }
}

使用示例

{
  kind: 'skill',
  title: '生成反事实解释',
  skill: {
    name: 'alibi-explainer',
    context: {
      modelPath: 'models/loan_classifier.pkl',
      explainerType: 'counterfactual',
      instancePath: 'data/rejected_applications.csv',
      counterfactualConfig: {
        targetClass: 1,
        maxIterations: 1000,
        lambda: 0.1
      },
      trainingDataPath: 'data/train.csv'
    }
  }
}