Python中KFold不可迭代的问题该如何解决?求大佬指点

主要涉及代码: import pandas as pd import numpy as np from scipy.stats import skew import xgboost as xgb from sklearn.model_selection import KFold from sklearn.ensemble import ExtraTreesRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.metrics import mean_squared_error from sklearn.linear_model import Ridge, RidgeCV, ElasticNet, LassoCV, Lasso from math import sqrt

TARGET = 'SalePrice' NFOLDS = 5 SEED = 0 NROWS = None SUBMISSION_FILE = 'sample_submission.csv'

Load the data

train = pd.read_csv("E:/数据集 /data/home/aistudio/data/data9072/housingPrices_train.csv") test = pd.read_csv("E:/数据集 /data/home/aistudio/data/data9072/housingPrices_test.csv")

ntrain = train.shape[0] ntest = test.shape[0]

Preprocessing

y_train = np.log(train[TARGET]+1)

train.drop([TARGET], axis=1, inplace=True)

all_data = pd.concat((train.loc[:,'MSSubClass':'SaleCondition'], test.loc[:,'MSSubClass':'SaleCondition']))

#log transform skewed numeric features: numeric_feats = all_data.dtypes[all_data.dtypes != "object"].index

skewed_feats = train[numeric_feats].apply(lambda x: skew(x.dropna())) #compute skewness skewed_feats = skewed_feats[skewed_feats > 0.75] skewed_feats = skewed_feats.index

all_data[skewed_feats] = np.log1p(all_data[skewed_feats])

all_data = pd.get_dummies(all_data)

#filling NA's with the mean of the column: all_data = all_data.fillna(all_data.mean())

#creating matrices for sklearn:

x_train = np.array(all_data[:train.shape[0]]) x_test = np.array(all_data[train.shape[0]:])

##交叉采样## kf = KFold(ntrain, shuffle=True, random_state=SEED)

class SklearnWrapper(object): def init(self, clf, seed=0, params=None): params['random_state'] = seed self.clf = clf(**params)

def train(self, x_train, y_train):
    self.clf.fit(x_train, y_train)

def predict(self, x): return self.clf.predict(x)

class XgbWrapper(object): def init(self, seed=0, params=None): self.param = params self.param['seed'] = seed self.nrounds = params.pop('nrounds', 250)

def train(self, x_train, y_train):
    dtrain = xgb.DMatrix(x_train, label=y_train)
    self.gbdt = xgb.train(self.param, dtrain, self.nrounds)

def predict(self, x): return self.gbdt.predict(xgb.DMatrix(x))

def get_oof(clf): oof_train = np.zeros((ntrain,)) oof_test = np.zeros((ntest,)) oof_test_skf = np.empty((NFOLDS, ntest))

for i, train_index, test_index in enumerate(kf):
    x_tr = x_train[train_index]
    y_tr = y_train[train_index]
    x_te = x_train[test_index]
clf.train(x_tr, y_tr)

oof_train[test_index] = clf.predict(x_te)
oof_test_skf[i, :] = clf.predict(x_test)

oof_test[:] = oof_test_skf.mean(axis=0) return oof_train.reshape(-1, 1), oof_test.reshape(-1, 1)

et_params = { 'n_jobs': 16, 'n_estimators': 100, 'max_features': 0.5, 'max_depth': 12, 'min_samples_leaf': 2, }

rf_params = { 'n_jobs': 16, 'n_estimators': 100, 'max_features': 0.2, 'max_depth': 12, 'min_samples_leaf': 2, }

xgb_params = { 'seed': 0, 'colsample_bytree': 0.7, 'silent': 1, 'subsample': 0.7, 'learning_rate': 0.075, 'objective': 'reg:linear', 'max_depth': 4, 'num_parallel_tree': 1, 'min_child_weight': 1, 'eval_metric': 'rmse', 'nrounds': 500 }

rd_params={ 'alpha': 10 }

ls_params={ 'alpha': 0.005 }

xg = XgbWrapper(seed=SEED, params=xgb_params) et = SklearnWrapper(clf=ExtraTreesRegressor, seed=SEED, params=et_params) rf = SklearnWrapper(clf=RandomForestRegressor, seed=SEED, params=rf_params) rd = SklearnWrapper(clf=Ridge, seed=SEED, params=rd_params) ls = SklearnWrapper(clf=Lasso, seed=SEED, params=ls_params)

xg_oof_train, xg_oof_test = get_oof(xg) et_oof_train, et_oof_test = get_oof(et) rf_oof_train, rf_oof_test = get_oof(rf) rd_oof_train, rd_oof_test = get_oof(rd) ls_oof_train, ls_oof_test = get_oof(ls) 。。。。。

报错如下: 153 ls = SklearnWrapper(clf=Lasso, seed=SEED, params=ls_params) 154 --> 155 xg_oof_train, xg_oof_test = get_oof(xg) 156 et_oof_train, et_oof_test = get_oof(et) 157 rf_oof_train, rf_oof_test = get_oof(rf)

<ipython-input-22-f597c93e7ccc> in get_oof(clf) 91 oof_test_skf = np.empty((NFOLDS, ntest)) 92 ---> 93 for i, train_index, test_index in enumerate(kf): 94 x_tr = x_train[train_index] 95 y_tr = y_train[train_index]</ipython-input-22-f597c93e7ccc>

TypeError: 'KFold' object is not iterable


Python中KFold不可迭代的问题该如何解决?求大佬指点

2 回复

遇到KFold不可迭代的问题,通常是因为直接对KFold对象进行了迭代操作。KFold本身是一个交叉验证拆分器,需要配合数据才能生成索引。

最常见的情况是直接尝试for train, test in kf:,但kf只是一个KFold实例。正确的做法是调用它的split()方法:

from sklearn.model_selection import KFold
import numpy as np

# 示例数据
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10]])
y = np.array([0, 1, 0, 1, 0])

# 创建KFold对象
kf = KFold(n_splits=3, shuffle=True, random_state=42)

# 正确用法:使用split()方法
for train_index, test_index in kf.split(X):
    print(f"训练索引: {train_index}")
    print(f"测试索引: {test_index}")
    
    # 获取对应的数据
    X_train, X_test = X[train_index], X[test_index]
    y_train, y_test = y[train_index], y[test_index]

如果你想要更简洁的写法,也可以这样:

# 直接迭代split()的结果
for train_idx, test_idx in kf.split(X, y):
    # 你的训练/测试代码
    pass

关键点:KFold对象本身不可迭代,必须调用split()方法才能获得可迭代的索引对。

检查一下你是不是漏了.split()


我复制的时候不是这样子的 大家可以先看报错部分

回到顶部