天池贷款违约预测

  • Post author:
  • Post category:其他


# 读取数据
data_row = pd.read_csv('D:/天池/贷款违约预测/train.csv')
data_row.head(5)
# 查看分类情况
data_row.isDefault.value_counts()
# 提取类别
cols_row = data_row.columns.to_list()
# 提取文本列并替换为数字
cat_ = data_row.select_dtypes(include='object').columns

Index([‘grade’, ‘subGrade’, ‘employmentLength’, ‘issueDate’,

‘earliesCreditLine’],

dtype=‘object’)

data_row['employmentLength'].unique()
data_row['employmentLength'].replace({'10+ years':10,'9 years':9,'8 years':8,'7 years':7,'6 years':6,'5 years':5,'4 years':4,'3 years':3,'2 years':2,'1 year':1,'< 1 year':0},inplace=True)
data_row['employmentLength'].unique()

array([ 2., 5., 8., 10., nan, 7., 9., 1., 3., 0., 4., 6.])

data_row['grade'].replace({'A':7,'B':6,'C':5,'D':4,'E':3,'F':2,'G':1},inplace=True)
data_row['subGrade'].replace({'E2':42,'D2':32,'D3':33,'A4':4, 'C2':22, 'A5':5, 'C3':23, 'B4':14, 'B5':15, 'E5':45, 'D4':34,
       'B3':13, 'B2':12, 'D1':31, 'E1':41, 'C5':25, 'C1':21, 'A2':2, 'A3':3, 'B1':11, 'E3':43, 'F1':51,
       'C4':24, 'A1':1, 'D5':35, 'F2':52, 'E4':44, 'F3':53, 'G2':62, 'F5':55, 'G3':63, 'G1':61, 'F4':54,
       'G4':64, 'G5':65},inplace=True)
# 删除无用列
data_row.drop(columns=['issueDate'],inplace=True)
data_row.drop(columns=['earliesCreditLine'],inplace=True)
data_row.drop(columns='id',axis=1,inplace=True)
data_row.drop(columns=['postCode','regionCode'],axis=1,inplace=True)
# 缺失值处理

# 查看缺失值
total = data_row.isnull().sum().sort_values(ascending=False)
percent = data_row.isnull().sum().sort_values(ascending=False)/total.sum()
missing_data = pd.concat([total,percent],axis=1,keys=['total','percent'])
missing_data.head(20)
data_row.fillna(method='ffill',inplace=True)
data_row.isnull().sum().sort_values(ascending=False)
# 处理异常值

# 均方差检测

def find_outliers(data,fea):
    data_std = np.std(data[fea])
    data_mean = np.mean(data[fea])
    cut_off = data_std * 3
    lower_rule = data_mean - cut_off
    upper_rule = data_mean + cut_off
    data[fea+'_outliers'] = data[fea].apply(lambda x :str('异常值') if x > upper_rule or x <lower_rule else str('正常值'))
    return data
fea_cols=data_row.columns.to_list()
fea_cols.remove('isDefault')
# 检测异常值

data_train = data_row.copy()
for fea in fea_cols:
    data_train = find_outliers(data_train,fea)
# 删除异常值

for fea in fea_cols:
    data_train = data_train[data_train[fea+'_outliers']=='正常值']
# 删除标签列

for fea in fea_cols:
    data_train = data_train.drop(columns=fea+'_outliers')
data_train = data_train.reset_index(drop=True)
# 划分标签列与特征列

X = data_train.loc[:,data_train.columns != 'isDefault']
y = data_train['isDefault']
data_train.isDefault.value_counts()
# 对数据进行下采样

number_default = len(data_train[data_train.isDefault == 1])
default_indices = np.array(data_train[data_train.isDefault == 1].index)

default_indices
normal_indices = data_train[data_train.isDefault == 0].index

random_normal_indices = np.random.choice(normal_indices, number_default, replace = False)  # 随机选择
random_normal_indices = np.array(random_normal_indices)

under_sample_indices = np.concatenate([default_indices,random_normal_indices])        # 将index值合并在一起

under_sample_data = data_train.iloc[under_sample_indices,:]    # 定位

X_undersample = under_sample_data.loc[:, under_sample_data.columns != 'isDefault']
y_undersample = under_sample_data.loc[:, under_sample_data.columns == 'isDefault']

print("Percentage of normal transactions: ", len(under_sample_data[under_sample_data.isDefault == 0])/len(under_sample_data))
print("Percentage of fraud transactions: ", len(under_sample_data[under_sample_data.isDefault == 1])/len(under_sample_data))
print("Total number of transactions in resampled data: ", len(under_sample_data))
# 划分数据集

from sklearn.model_selection import train_test_split

X_train_undersample, X_test_undersample, y_train_undersample, y_test_undersample = train_test_split(X_undersample,y_undersample,test_size=0.3,random_state=0)

print("")
print("Number transactions train dataset: ", len(X_train_undersample))
print("Number transcations test dataset: ",len( X_test_undersample))
print("Total number of transcations: ",len(X_train_undersample)+len(X_test_undersample))
# 随机森林分类

from sklearn.metrics import roc_auc_score
rf = RandomForestClassifier(random_state=666, n_estimators=50, min_samples_split=8, min_samples_leaf=2,max_depth=8)
rf.fit(X_train_undersample, y_train_undersample)

# print(rf.oob_score_)

y_predprob = rf.predict_proba(X_test_undersample)[:,1]
y_pred=rf.predict(X_test_undersample)


print("AUC Score (Train): %f" % roc_auc_score(y_test_undersample, y_predprob))



版权声明:本文为weixin_48252774原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。