工作抽样分析器Skill work-sampling-analyzer

工作抽样分析器是一款专门用于工业工程和流程优化的AI工具,通过随机抽样方法分析工人活动分布、设备利用率和工作效率。该技能提供样本量计算、随机观察调度、统计分析、控制图监控等功能,帮助企业确定标准工时、优化资源配置、提高生产效率。关键词:工作抽样、工时研究、效率分析、利用率统计、工业工程、流程优化、标准时间、质量控制、生产管理、精益制造。

精益生产 0 次安装 9 次浏览 更新于 2/25/2026

name: work-sampling-analyzer description: 用于活动分布和利用率研究的工作抽样分析技能。 allowed-tools: Bash(*) Read Write Edit Glob Grep WebFetch metadata: author: babysitter-sdk version: “1.0.0” category: work-measurement backlog-id: SK-IE-035

工作抽样分析器

您是工作抽样分析器 - 专门用于工作抽样研究以分析活动分布和设备/工人利用率的技能。

概述

此技能支持AI驱动的工作抽样,包括:

  • 随机观察调度
  • 样本量确定
  • 活动分类
  • 统计置信区间
  • 控制图监控
  • 多活动研究
  • 从抽样开发标准时间
  • 利用率分析

功能

1. 样本量确定

import numpy as np
from scipy import stats
import random
from datetime import datetime, timedelta

def determine_sample_size_binomial(estimated_proportion: float,
                                  desired_accuracy: float,
                                  confidence_level: float = 0.95):
    """
    确定工作抽样所需的样本量

    estimated_proportion: 活动中估计的时间比例(十进制)
    desired_accuracy: 期望精度(例如,0.05表示±5%)
    confidence_level: 统计置信度(通常为0.95)
    """
    p = estimated_proportion
    e = desired_accuracy
    z = stats.norm.ppf(1 - (1 - confidence_level) / 2)

    # n = (z² × p × (1-p)) / e²
    n = (z ** 2 * p * (1 - p)) / (e ** 2)

    return {
        "required_observations": int(np.ceil(n)),
        "estimated_proportion": p,
        "desired_accuracy": f"±{e * 100:.1f}%",
        "confidence_level": f"{confidence_level * 100:.0f}%",
        "z_score": round(z, 2)
    }

def update_sample_size(observations: int, observed_proportion: float,
                      desired_accuracy: float, confidence_level: float = 0.95):
    """
    基于观察数据更新样本量
    """
    z = stats.norm.ppf(1 - (1 - confidence_level) / 2)

    # 使用观察比例重新计算
    p = observed_proportion
    required = int(np.ceil((z ** 2 * p * (1 - p)) / (desired_accuracy ** 2)))

    return {
        "current_observations": observations,
        "observed_proportion": round(p, 3),
        "required_observations": required,
        "additional_needed": max(0, required - observations),
        "study_complete": observations >= required
    }

2. 随机观察调度

def generate_random_schedule(study_duration_days: int,
                            observations_per_day: int,
                            work_start: str = "08:00",
                            work_end: str = "17:00",
                            exclude_lunch: tuple = ("12:00", "13:00")):
    """
    为工作抽样研究生成随机观察时间
    """
    start_time = datetime.strptime(work_start, "%H:%M")
    end_time = datetime.strptime(work_end, "%H:%M")
    lunch_start = datetime.strptime(exclude_lunch[0], "%H:%M")
    lunch_end = datetime.strptime(exclude_lunch[1], "%H:%M")

    schedule = []

    for day in range(study_duration_days):
        day_schedule = []

        # 生成随机时间
        attempts = 0
        while len(day_schedule) < observations_per_day and attempts < 1000:
            # 从开始到结束的随机分钟数
            total_minutes = (end_time - start_time).seconds // 60
            random_minutes = random.randint(0, total_minutes)
            obs_time = start_time + timedelta(minutes=random_minutes)

            # 检查是否在午餐时间
            if lunch_start <= obs_time < lunch_end:
                attempts += 1
                continue

            # 检查最小间隔(10分钟)
            too_close = False
            for existing in day_schedule:
                if abs((obs_time - existing).seconds) < 600:  # 10分钟
                    too_close = True
                    break

            if not too_close:
                day_schedule.append(obs_time)

            attempts += 1

        day_schedule.sort()
        schedule.append({
            'day': day + 1,
            'times': [t.strftime("%H:%M") for t in day_schedule]
        })

    return {
        "total_days": study_duration_days,
        "observations_per_day": observations_per_day,
        "total_observations": study_duration_days * observations_per_day,
        "schedule": schedule
    }

3. 活动分析

def analyze_observations(observations: list, categories: list):
    """
    分析工作抽样观察结果

    observations: 观察到的类别列表
    categories: 可能的类别列表
    """
    total = len(observations)

    # 按类别计数
    counts = {cat: observations.count(cat) for cat in categories}

    # 计算比例和置信区间
    z = stats.norm.ppf(0.975)  # 95%置信度

    results = []
    for cat in categories:
        count = counts[cat]
        p = count / total if total > 0 else 0

        # 标准误差
        se = np.sqrt(p * (1 - p) / total) if total > 0 else 0

        # 置信区间
        ci_lower = max(0, p - z * se)
        ci_upper = min(1, p + z * se)

        results.append({
            'category': cat,
            'count': count,
            'proportion': round(p, 4),
            'percentage': round(p * 100, 1),
            'std_error': round(se, 4),
            'ci_95_lower': round(ci_lower * 100, 1),
            'ci_95_upper': round(ci_upper * 100, 1)
        })

    # 按比例降序排序
    results.sort(key=lambda x: x['proportion'], reverse=True)

    return {
        "total_observations": total,
        "categories": results,
        "summary": {
            "productive": sum(r['proportion'] for r in results
                           if 'idle' not in r['category'].lower() and
                           'delay' not in r['category'].lower()) * 100,
            "non_productive": sum(r['proportion'] for r in results
                                if 'idle' in r['category'].lower() or
                                'delay' in r['category'].lower()) * 100
        }
    }

4. 工作抽样控制图

def create_sampling_control_chart(daily_observations: list,
                                 target_proportion: float = None):
    """
    创建控制图以监控抽样一致性

    daily_observations: 每日观察数据列表 {'day': int, 'productive': int, 'total': int}
    """
    # 计算总体平均值
    total_productive = sum(d['productive'] for d in daily_observations)
    total_obs = sum(d['total'] for d in daily_observations)
    p_bar = total_productive / total_obs if total_obs > 0 else 0

    # 计算每日控制限(可变样本量)
    chart_data = []

    for day_data in daily_observations:
        n = day_data['total']
        p = day_data['productive'] / n if n > 0 else 0

        # 标准误差
        se = np.sqrt(p_bar * (1 - p_bar) / n) if n > 0 else 0

        # 3-sigma控制限
        ucl = min(1, p_bar + 3 * se)
        lcl = max(0, p_bar - 3 * se)

        # 检查是否在控制中
        in_control = lcl <= p <= ucl

        chart_data.append({
            'day': day_data['day'],
            'proportion': round(p, 4),
            'sample_size': n,
            'ucl': round(ucl, 4),
            'lcl': round(lcl, 4),
            'in_control': in_control
        })

    # 识别失控点
    ooc_points = [d for d in chart_data if not d['in_control']]

    return {
        "center_line": round(p_bar, 4),
        "chart_data": chart_data,
        "out_of_control_days": len(ooc_points),
        "process_stable": len(ooc_points) == 0,
        "recommendation": "过程稳定" if len(ooc_points) == 0
                         else f"调查{len(ooc_points)}个失控点"
    }

5. 从工作抽样计算标准时间

def calculate_standard_time_from_sampling(sampling_results: dict,
                                         total_study_time_hours: float,
                                         units_produced: int,
                                         allowance_percent: float):
    """
    从工作抽样数据开发标准时间

    sampling_results: analyze_observations的结果
    total_study_time_hours: 研究覆盖的总时间
    units_produced: 研究期间生产的单位数量
    allowance_percent: PFD津贴
    """
    # 找到生产时间比例
    productive_proportion = sampling_results['summary']['productive'] / 100

    # 总生产时间
    productive_hours = total_study_time_hours * productive_proportion

    # 每单位正常时间
    normal_time_hours = productive_hours / units_produced if units_produced > 0 else 0
    normal_time_minutes = normal_time_hours * 60

    # 应用津贴
    allowance_factor = 1 + (allowance_percent / 100)
    standard_time_minutes = normal_time_minutes * allowance_factor

    return {
        "study_duration_hours": total_study_time_hours,
        "productive_proportion": round(productive_proportion, 3),
        "productive_hours": round(productive_hours, 2),
        "units_produced": units_produced,
        "normal_time_per_unit": round(normal_time_minutes, 3),
        "allowance_percent": allowance_percent,
        "standard_time_per_unit": round(standard_time_minutes, 3),
        "pieces_per_hour": round(60 / standard_time_minutes, 1) if standard_time_minutes > 0 else 0
    }

6. 多活动研究

def multi_activity_study(observations: list, workers: list, machines: list):
    """
    分析工人和机器的多活动工作抽样

    observations: 观察数据列表 {'time': str, 'worker': str, 'activity': str, 'machine': str}
    """
    total_obs = len(observations)

    # 按工人分析
    worker_analysis = {}
    for worker in workers:
        worker_obs = [o for o in observations if o['worker'] == worker]
        n = len(worker_obs)

        activities = {}
        for obs in worker_obs:
            act = obs['activity']
            activities[act] = activities.get(act, 0) + 1

        worker_analysis[worker] = {
            'observations': n,
            'activities': {k: {'count': v, 'percent': round(v / n * 100, 1)}
                         for k, v in activities.items()}
        }

    # 按机器分析
    machine_analysis = {}
    for machine in machines:
        machine_obs = [o for o in observations if o.get('machine') == machine]
        n = len(machine_obs)

        states = {}
        for obs in machine_obs:
            state = obs.get('machine_state', 'running')
            states[state] = states.get(state, 0) + 1

        if n > 0:
            machine_analysis[machine] = {
                'observations': n,
                'states': {k: {'count': v, 'percent': round(v / n * 100, 1)}
                          for k, v in states.items()},
                'utilization': round(states.get('running', 0) / n * 100, 1)
            }

    return {
        "total_observations": total_obs,
        "worker_analysis": worker_analysis,
        "machine_analysis": machine_analysis,
        "summary": {
            "avg_worker_utilization": np.mean([
                100 - w['activities'].get('idle', {}).get('percent', 0)
                for w in worker_analysis.values()
            ]),
            "avg_machine_utilization": np.mean([
                m['utilization'] for m in machine_analysis.values()
            ]) if machine_analysis else 0
        }
    }

流程集成

此技能与以下流程集成:

  • work-measurement-analysis.js
  • utilization-improvement.js
  • workforce-planning.js

输出格式

{
  "study_summary": {
    "total_observations": 500,
    "study_duration_days": 10,
    "confidence_level": "95%"
  },
  "activity_analysis": {
    "productive": {"percent": 72.5, "ci": [68.5, 76.5]},
    "idle": {"percent": 15.2, "ci": [12.1, 18.3]},
    "delay": {"percent": 12.3, "ci": [9.5, 15.1]}
  },
  "utilization": {
    "worker": 84.8,
    "machine": 78.2
  },
  "standard_time": {
    "normal_time": 2.45,
    "standard_time": 2.82
  },
  "recommendations": [
    "通过更好的调度减少空闲时间",
    "调查延迟原因"
  ]
}

最佳实践

  1. 真正随机的时间 - 使用随机数生成器
  2. 足够的观察次数 - 基于期望精度
  3. 一致的分类 - 培训所有观察员
  4. 简要观察 - 即时记录所见
  5. 告知工人 - 减少霍桑效应
  6. 监控稳定性 - 使用控制图

限制

  • 无法确定活动顺序
  • 需要清晰的类别定义
  • 随机调度可能错过罕见事件
  • 工人行为在被观察时可能改变