1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495 |
- # -*- coding:utf-8 -*-
- """
- @author: yq
- @time: 2023/12/28
- @desc: 特征工具类
- """
- import numpy as np
- import pandas as pd
- import scorecardpy as sc
- from statsmodels.stats.outliers_influence import variance_inflation_factor as vif
- # 此函数判断list的单调性,允许至多N次符号变化
- def f_judge_monto(bd_list: list, pos_neg_cnt: int = 1) -> int:
- start_tr = bd_list[1] - bd_list[0]
- tmp_len = len(bd_list)
- pos_neg_flag = 0
- for i in range(2, tmp_len):
- tmp_tr = bd_list[i] - bd_list[i - 1]
- # 后一位bad_rate减前一位bad_rate,保证bad_rate的单调性
- # 记录符号变化, 允许 最多一次符号变化,即U型分布
- if (tmp_tr >= 0 and start_tr >= 0) or (tmp_tr <= 0 and start_tr <= 0):
- # 满足趋势保持,查看下一位
- continue
- else:
- # 记录一次符号变化
- start_tr = tmp_tr
- pos_neg_flag += 1
- if pos_neg_flag > pos_neg_cnt:
- return False
- # 记录满足趋势要求的变量
- if pos_neg_flag <= pos_neg_cnt:
- return True
- return False
- def f_get_corr(data: pd.DataFrame, meth: str = 'spearman') -> pd.DataFrame:
- return data.corr(method=meth)
- def f_get_ivf(data: pd.DataFrame) -> pd.DataFrame:
- if len(data.columns.to_list()) <= 1:
- return None
- vif_v = [vif(data.values, data.columns.get_loc(i)) for i in data.columns]
- vif_df = pd.DataFrame()
- vif_df["变量"] = data.columns
- vif_df['vif'] = vif_v
- return vif_df
- def f_calcu_model_ks(data, y_column, sort_ascending):
- var_ks = data.groupby('MODEL_SCORE_BIN')[y_column].agg([len, np.sum]).sort_index(ascending=sort_ascending)
- var_ks.columns = ['样本数', '坏样本数']
- var_ks['好样本数'] = var_ks['样本数'] - var_ks['坏样本数']
- var_ks['坏样本比例'] = (var_ks['坏样本数'] / var_ks['样本数']).round(4)
- var_ks['样本数比例'] = (var_ks['样本数'] / var_ks['样本数'].sum()).round(4)
- var_ks['总坏样本数'] = var_ks['坏样本数'].sum()
- var_ks['总好样本数'] = var_ks['好样本数'].sum()
- var_ks['平均坏样本率'] = (var_ks['总坏样本数'] / var_ks['样本数'].sum()).round(4)
- var_ks['累计坏样本数'] = var_ks['坏样本数'].cumsum()
- var_ks['累计好样本数'] = var_ks['好样本数'].cumsum()
- var_ks['累计样本数'] = var_ks['样本数'].cumsum()
- var_ks['累计坏样本比例'] = (var_ks['累计坏样本数'] / var_ks['总坏样本数']).round(4)
- var_ks['累计好样本比例'] = (var_ks['累计好样本数'] / var_ks['总好样本数']).round(4)
- var_ks['KS'] = (var_ks['累计坏样本比例'] - var_ks['累计好样本比例']).round(4)
- var_ks['LIFT'] = ((var_ks['累计坏样本数'] / var_ks['累计样本数']) / var_ks['平均坏样本率']).round(4)
- return var_ks.reset_index()
- def f_get_model_score_bin(df, card, bins=None):
- train_score = sc.scorecard_ply(df, card, print_step=0)
- df['score'] = train_score
- if bins is None:
- _, bins = pd.qcut(df['score'], q=10, retbins=True, duplicates="drop")
- bins = list(bins)
- bins[0] = -np.inf
- bins[-1] = np.inf
- score_bins = pd.cut(df['score'], bins=bins)
- df['MODEL_SCORE_BIN'] = score_bins.astype(str).values
- return df, bins
- def f_calcu_model_psi(df_train, df_test):
- tmp1 = df_train.groupby('MODEL_SCORE_BIN')['MODEL_SCORE_BIN'].agg(['count']).sort_index(ascending=True)
- tmp1['样本数比例'] = (tmp1['count'] / tmp1['count'].sum()).round(4)
- tmp2 = df_test.groupby('MODEL_SCORE_BIN')['MODEL_SCORE_BIN'].agg(['count']).sort_index(ascending=True)
- tmp2['样本数比例'] = (tmp2['count'] / tmp2['count'].sum()).round(4)
- psi = ((tmp1['样本数比例'] - tmp2['样本数比例']) * np.log(tmp1['样本数比例'] / tmp2['样本数比例'])).round(4)
- psi = psi.reset_index()
- psi = psi.rename(columns={"样本数比例": "psi"})
- psi['训练样本数'] = list(tmp1['count'])
- psi['测试样本数'] = list(tmp2['count'])
- psi['训练样本数比例'] = list(tmp1['样本数比例'])
- psi['测试样本数比例'] = list(tmp2['样本数比例'])
- return psi
|