ML模型训练
训练机器学习模型涉及选择适当的算法、准备数据和优化模型参数以实现强大的预测性能。
训练阶段
- 数据准备:清洗、编码、归一化
- 特征工程:创建有意义的特征
- 模型选择:选择适当的算法
- 超参数调优:优化模型设置
- 验证:交叉验证和评估指标
- 部署:准备模型用于生产
常见算法
- 回归:线性、岭回归、套索回归、随机森林
- 分类:逻辑回归、SVM、随机森林、梯度提升
- 聚类:K-Means、DBSCAN、层次聚类
- 神经网络:多层感知器、卷积神经网络、循环神经网络、变换器
Python实现
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (accuracy_score, precision_score, recall_score,
f1_score, confusion_matrix, roc_auc_score)
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
import tensorflow as tf
from tensorflow import keras
# 1. 生成合成数据集
np.random.seed(42)
n_samples = 1000
n_features = 20
X = np.random.randn(n_samples, n_features)
y = (X[:, 0] + X[:, 1] - X[:, 2] + np.random.randn(n_samples) * 0.5 > 0).astype(int)
# 分割数据
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# 归一化特征
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
print("数据集形状:")
print(f"训练: {X_train_scaled.shape}, 测试: {X_test_scaled.shape}")
print(f"类别分布: {np.bincount(y_train)}")
# 2. Scikit-learn模型
print("
=== Scikit-learn模型 ===")
models = {
'逻辑回归': LogisticRegression(max_iter=1000),
'随机森林': RandomForestClassifier(n_estimators=100, random_state=42),
'梯度提升': GradientBoostingClassifier(n_estimators=100, random_state=42),
}
sklearn_results = {}
for name, model in models.items():
model.fit(X_train_scaled, y_train)
y_pred = model.predict(X_test_scaled)
y_pred_proba = model.predict_proba(X_test_scaled)[:, 1]
sklearn_results[name] = {
'准确率': accuracy_score(y_test, y_pred),
'精确率': precision_score(y_test, y_pred),
'召回率': recall_score(y_test, y_pred),
'f1': f1_score(y_test, y_pred),
'roc_auc': roc_auc_score(y_test, y_pred_proba)
}
print(f"
{name}:")
for metric, value in sklearn_results[name].items():
print(f" {metric}: {value:.4f}")
# 3. PyTorch神经网络
print("
=== PyTorch模型 ===")
class NeuralNetPyTorch(nn.Module):
def __init__(self, input_size):
super().__init__()
self.fc1 = nn.Linear(input_size, 64)
self.fc2 = nn.Linear(64, 32)
self.fc3 = nn.Linear(32, 1)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(0.3)
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.dropout(x)
x = self.relu(self.fc2(x))
x = self.dropout(x)
x = torch.sigmoid(self.fc3(x))
return x
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
pytorch_model = NeuralNetPyTorch(n_features).to(device)
criterion = nn.BCELoss()
optimizer = torch.optim.Adam(pytorch_model.parameters(), lr=0.001)
# 创建数据加载器
train_dataset = TensorDataset(torch.FloatTensor(X_train_scaled),
torch.FloatTensor(y_train).unsqueeze(1))
train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)
# 训练PyTorch模型
epochs = 50
pytorch_losses = []
for epoch in range(epochs):
total_loss = 0
for batch_X, batch_y in train_loader:
batch_X, batch_y = batch_X.to(device), batch_y.to(device)
optimizer.zero_grad()
outputs = pytorch_model(batch_X)
loss = criterion(outputs, batch_y)
loss.backward()
optimizer.step()
total_loss += loss.item()
pytorch_losses.append(total_loss / len(train_loader))
if (epoch + 1) % 10 == 0:
print(f"Epoch {epoch + 1}/{epochs}, Loss: {pytorch_losses[-1]:.4f}")
# 评估PyTorch
pytorch_model.eval()
with torch.no_grad():
y_pred_pytorch = pytorch_model(torch.FloatTensor(X_test_scaled).to(device))
y_pred_pytorch = (y_pred_pytorch.cpu().numpy() > 0.5).astype(int).flatten()
print(f"
PyTorch准确率: {accuracy_score(y_test, y_pred_pytorch):.4f}")
# 4. TensorFlow/Keras模型
print("
=== TensorFlow/Keras模型 ===")
tf_model = keras.Sequential([
keras.layers.Dense(64, activation='relu', input_shape=(n_features,)),
keras.layers.Dropout(0.3),
keras.layers.Dense(32, activation='relu'),
keras.layers.Dropout(0.3),
keras.layers.Dense(1, activation='sigmoid')
])
tf_model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy']
)
history = tf_model.fit(
X_train_scaled, y_train,
batch_size=32,
epochs=50,
validation_split=0.2,
verbose=0
)
y_pred_tf = (tf_model.predict(X_test_scaled) > 0.5).astype(int).flatten()
print(f"TensorFlow准确率: {accuracy_score(y_test, y_pred_tf):.4f}")
# 5. 可视化
fig, axes = plt.subplots(2, 2, figsize=(12, 10))
# 模型比较
models_names = list(sklearn_results.keys()) + ['PyTorch', 'TensorFlow']
accuracies = [sklearn_results[m]['accuracy'] for m in sklearn_results.keys()] + \
[accuracy_score(y_test, y_pred_pytorch),
accuracy_score(y_test, y_pred_tf)]
axes[0, 0].bar(range(len(models_names)), accuracies, color='steelblue')
axes[0, 0].set_xticks(range(len(models_names)))
axes[0, 0].set_xticklabels(models_names, rotation=45)
axes[0, 0].set_ylabel('准确率')
axes[0, 0].set_title('模型比较')
axes[0, 0].set_ylim([0, 1])
# 训练损失曲线
axes[0, 1].plot(pytorch_losses, label='PyTorch', linewidth=2)
axes[0, 1].plot(history.history['loss'], label='TensorFlow', linewidth=2)
axes[0, 1].set_xlabel('Epoch')
axes[0, 1].set_ylabel('Loss')
axes[0, 1].set_title('训练损失比较')
axes[0, 1].legend()
axes[0, 1].grid(True, alpha=0.3)
# Scikit-learn指标
metrics = ['accuracy', 'precision', 'recall', 'f1']
rf_metrics = [sklearn_results['随机森林'][m] for m in metrics]
axes[1, 0].bar(metrics, rf_metrics, color='coral')
axes[1, 0].set_ylabel('分数')
axes[1, 0].set_title('随机森林指标')
axes[1, 0].set_ylim([0, 1])
# 验证准确率随Epoch变化
axes[1, 1].plot(history.history['accuracy'], label='Training', linewidth=2)
axes[1, 1].plot(history.history['val_accuracy'], label='Validation', linewidth=2)
axes[1, 1].set_xlabel('Epoch')
axes[1, 1].set_ylabel('Accuracy')
axes[1, 1].set_title('TensorFlow训练历史')
axes[1, 1].legend()
axes[1, 1].grid(True, alpha=0.3)
plt.tight_layout()
plt.savefig('model_training_comparison.png', dpi=100, bbox_inches='tight')
print("
可视化保存为'model_training_comparison.png'")
print("
模型训练完成!")
训练最佳实践
- 数据分割:70/15/15用于训练/验证/测试
- 缩放:训练前归一化特征
- 交叉验证:使用K折进行稳健评估
- 早停:防止过拟合
- 类别平衡:处理不平衡数据集
关键指标
- 准确率:整体正确率
- 精确率:正预测准确率
- 召回率:真正例检测率
- F1分数:精确率/召回率的调和平均
- ROC-AUC:与阈值无关的指标
交付物
- 训练好的模型检查点
- 测试集上的性能指标
- 特征重要性分析
- 学习曲线
- 超参数配置
- 模型评估报告