feature_utils.py 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. # -*- coding:utf-8 -*-
  2. """
  3. @author: yq
  4. @time: 2023/12/28
  5. @desc: 特征工具类
  6. """
  7. import numpy as np
  8. import pandas as pd
  9. import scorecardpy as sc
  10. from statsmodels.stats.outliers_influence import variance_inflation_factor as vif
  11. FORMAT_DICT = {
  12. # 比例类 -1 - 1
  13. "bin_rate1": np.arange(-1, 1 + 0.1, 0.1),
  14. # 次数类1 0 -10
  15. "bin_cnt1": np.arange(0, 11, 1),
  16. # 次数类2 0 - 20
  17. "bin_cnt2": [0, 1, 2, 3, 4, 5, 8, 10, 15, 20],
  18. # 次数类3 0 - 50
  19. "bin_cnt3": [0, 2, 4, 6, 8, 10, 15, 20, 25, 30, 35, 40, 45, 50],
  20. # 次数类4 0 - 100
  21. "bin_cnt4": [0, 3, 6, 10, 15, 20, 30, 40, 50, 100],
  22. # 金额类1 0 - 1w
  23. "bin_amt1": np.arange(0, 1.1e4, 1e3),
  24. # 金额类2 0 - 5w
  25. "bin_amt2": np.arange(0, 5.5e4, 5e3),
  26. # 金额类3 0 - 10w
  27. "bin_amt3": np.arange(0, 11e4, 1e4),
  28. # 金额类4 0 - 20w
  29. "bin_amt4": [0, 1e4, 2e4, 3e4, 4e4, 5e4, 8e4, 10e4, 15e4, 20e4],
  30. # 金额类5 0 - 100w
  31. "bin_amt5": [0, 5e4, 10e4, 15e4, 20e4, 25e4, 30e4, 40e4, 50e4, 100e4],
  32. # 年龄类
  33. "bin_age": [20, 25, 30, 35, 40, 45, 50, 55, 60, 65],
  34. }
  35. # 粗分箱
  36. def f_format_bin(data_describe: pd.Series, raw_v):
  37. percent10 = data_describe["10%"]
  38. percent90 = data_describe["90%"]
  39. format_v = raw_v
  40. # 筛选最合适的标准化分箱节点
  41. bin = None
  42. for k, v_list in FORMAT_DICT.items():
  43. bin_min = min(v_list)
  44. bin_max = max(v_list)
  45. if percent10 >= bin_min and percent90 <= bin_max:
  46. if bin is None:
  47. bin = (k, bin_max)
  48. elif bin[1] > bin_max:
  49. bin = (k, bin_max)
  50. if bin is None:
  51. return format_v
  52. # 选择分箱内适合的切分点
  53. v_list = FORMAT_DICT[bin[0]]
  54. for idx in range(1, len(v_list)):
  55. v_left = v_list[idx - 1]
  56. v_right = v_list[idx]
  57. # 就近原则
  58. if v_left <= raw_v <= v_right:
  59. format_v = v_right if (raw_v - v_left) - (v_right - raw_v) > 0 else v_left
  60. if format_v not in v_list:
  61. if format_v > v_list[-1]:
  62. format_v = v_list[-1]
  63. if format_v < v_list[0]:
  64. format_v = v_list[0]
  65. return format_v
  66. # 此函数判断list的单调性,允许至多N次符号变化
  67. def f_judge_monto(bd_list: list, pos_neg_cnt: int = 1) -> bool:
  68. if len(bd_list) < 2:
  69. return True
  70. start_tr = bd_list[1] - bd_list[0]
  71. tmp_len = len(bd_list)
  72. pos_neg_flag = 0
  73. for i in range(2, tmp_len):
  74. tmp_tr = bd_list[i] - bd_list[i - 1]
  75. # 后一位bad_rate减前一位bad_rate,保证bad_rate的单调性
  76. # 记录符号变化, 允许 最多一次符号变化,即U型分布
  77. if (tmp_tr >= 0 and start_tr >= 0) or (tmp_tr <= 0 and start_tr <= 0):
  78. # 满足趋势保持,查看下一位
  79. continue
  80. else:
  81. # 记录一次符号变化
  82. start_tr = tmp_tr
  83. pos_neg_flag += 1
  84. if pos_neg_flag > pos_neg_cnt:
  85. return False
  86. # 记录满足趋势要求的变量
  87. if pos_neg_flag <= pos_neg_cnt:
  88. return True
  89. return False
  90. def f_get_corr(data: pd.DataFrame, meth: str = 'spearman') -> pd.DataFrame:
  91. return data.corr(method=meth)
  92. def f_get_ivf(data: pd.DataFrame) -> pd.DataFrame:
  93. if len(data.columns.to_list()) <= 1:
  94. return None
  95. vif_v = [vif(data.values, data.columns.get_loc(i)) for i in data.columns]
  96. vif_df = pd.DataFrame()
  97. vif_df["变量"] = data.columns
  98. vif_df['vif'] = vif_v
  99. return vif_df
  100. def f_calcu_model_ks(data, y_column, sort_ascending):
  101. var_ks = data.groupby('MODEL_SCORE_BIN')[y_column].agg([len, np.sum]).sort_index(ascending=sort_ascending)
  102. var_ks.columns = ['样本数', '坏样本数']
  103. var_ks['好样本数'] = var_ks['样本数'] - var_ks['坏样本数']
  104. var_ks['坏样本比例'] = (var_ks['坏样本数'] / var_ks['样本数']).round(4)
  105. var_ks['样本数比例'] = (var_ks['样本数'] / var_ks['样本数'].sum()).round(4)
  106. var_ks['总坏样本数'] = var_ks['坏样本数'].sum()
  107. var_ks['总好样本数'] = var_ks['好样本数'].sum()
  108. var_ks['平均坏样本率'] = (var_ks['总坏样本数'] / var_ks['样本数'].sum()).round(4)
  109. var_ks['累计坏样本数'] = var_ks['坏样本数'].cumsum()
  110. var_ks['累计好样本数'] = var_ks['好样本数'].cumsum()
  111. var_ks['累计样本数'] = var_ks['样本数'].cumsum()
  112. var_ks['累计坏样本比例'] = (var_ks['累计坏样本数'] / var_ks['总坏样本数']).round(4)
  113. var_ks['累计好样本比例'] = (var_ks['累计好样本数'] / var_ks['总好样本数']).round(4)
  114. var_ks['KS'] = (var_ks['累计坏样本比例'] - var_ks['累计好样本比例']).round(4)
  115. var_ks['LIFT'] = ((var_ks['累计坏样本数'] / var_ks['累计样本数']) / var_ks['平均坏样本率']).round(4)
  116. return var_ks.reset_index()
  117. def f_get_model_score_bin(df, card, bins=None):
  118. train_score = sc.scorecard_ply(df, card, print_step=0)
  119. df['score'] = train_score
  120. if bins is None:
  121. _, bins = pd.qcut(df['score'], q=10, retbins=True, duplicates="drop")
  122. bins = list(bins)
  123. bins[0] = -np.inf
  124. bins[-1] = np.inf
  125. score_bins = pd.cut(df['score'], bins=bins)
  126. df['MODEL_SCORE_BIN'] = score_bins.astype(str).values
  127. return df, bins
  128. def f_calcu_model_psi(df_train, df_test):
  129. tmp1 = df_train.groupby('MODEL_SCORE_BIN')['MODEL_SCORE_BIN'].agg(['count']).sort_index(ascending=True)
  130. tmp1['样本数比例'] = (tmp1['count'] / tmp1['count'].sum()).round(4)
  131. tmp2 = df_test.groupby('MODEL_SCORE_BIN')['MODEL_SCORE_BIN'].agg(['count']).sort_index(ascending=True)
  132. tmp2['样本数比例'] = (tmp2['count'] / tmp2['count'].sum()).round(4)
  133. psi = ((tmp1['样本数比例'] - tmp2['样本数比例']) * np.log(tmp1['样本数比例'] / tmp2['样本数比例'])).round(4)
  134. psi = psi.reset_index()
  135. psi = psi.rename(columns={"样本数比例": "psi"})
  136. psi['训练样本数'] = list(tmp1['count'])
  137. psi['测试样本数'] = list(tmp2['count'])
  138. psi['训练样本数比例'] = list(tmp1['样本数比例'])
  139. psi['测试样本数比例'] = list(tmp2['样本数比例'])
  140. return psi