strategy_iv.py 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513
  1. # -*- coding:utf-8 -*-
  2. """
  3. @author: yq
  4. @time: 2024/1/2
  5. @desc: iv值及单调性筛选类
  6. """
  7. import json
  8. from itertools import combinations_with_replacement
  9. from typing import List, Dict, Tuple
  10. import matplotlib.pyplot as plt
  11. import numpy as np
  12. import pandas as pd
  13. import scorecardpy as sc
  14. import seaborn as sns
  15. from pandas.core.dtypes.common import is_numeric_dtype
  16. from tqdm import tqdm
  17. from commom import f_display_images_by_side, NumpyEncoder
  18. from entitys import DataSplitEntity, CandidateFeatureEntity, DataPreparedEntity, DataFeatureEntity, MetricFucEntity
  19. from .feature_utils import f_judge_monto, f_get_corr, f_get_ivf, f_format_bin, f_monto_contrast
  20. from .filter_strategy_base import FilterStrategyBase
  21. class StrategyIv(FilterStrategyBase):
  22. def __init__(self, *args, **kwargs):
  23. super().__init__(*args, **kwargs)
  24. def _f_get_iv_by_bins(self, bins) -> pd.DataFrame:
  25. iv = {key_: [round(value_['total_iv'].max(), 4)] for key_, value_ in bins.items()}
  26. iv = pd.DataFrame.from_dict(iv, orient='index', columns=['IV']).reset_index()
  27. iv = iv.sort_values('IV', ascending=False).reset_index(drop=True)
  28. iv.columns = ['变量', 'IV']
  29. return iv
  30. def _f_get_var_corr_image(self, train_woe):
  31. if len(train_woe.columns.to_list()) <= 1:
  32. return None
  33. train_corr = f_get_corr(train_woe)
  34. plt.figure(figsize=(12, 12))
  35. sns.heatmap(train_corr, vmax=1, square=True, cmap='RdBu', annot=True)
  36. plt.title('Variables Correlation', fontsize=15)
  37. plt.yticks(rotation=0)
  38. plt.xticks(rotation=90)
  39. path = self.data_process_config.f_get_save_path(f"var_corr.png")
  40. plt.savefig(path)
  41. return path
  42. def _f_save_var_trend(self, bins, x_columns_candidate, prefix):
  43. image_path_list = []
  44. for k in x_columns_candidate:
  45. bin_df = bins[k]
  46. # bin_df["bin"] = bin_df["bin"].apply(lambda x: re.sub(r"(\d+\.\d+)",
  47. # lambda m: "{:.2f}".format(float(m.group(0))), x))
  48. sc.woebin_plot(bin_df)
  49. path = self.data_process_config.f_get_save_path(f"{prefix}_{k}.png")
  50. plt.savefig(path)
  51. image_path_list.append(path)
  52. return image_path_list
  53. def _f_get_bins_by_breaks(self, data: pd.DataFrame, candidate_dict: Dict[str, CandidateFeatureEntity],
  54. y_column=None):
  55. y_column = self.data_process_config.y_column if y_column is None else y_column
  56. special_values = self.data_process_config.special_values
  57. x_columns_candidate = list(candidate_dict.keys())
  58. breaks_list = {}
  59. for column, candidate in candidate_dict.items():
  60. breaks_list[column] = candidate.breaks_list
  61. bins = sc.woebin(data[x_columns_candidate + [y_column]], y=y_column, breaks_list=breaks_list,
  62. special_values=special_values, print_info=False)
  63. return bins
  64. def _f_corr_filter(self, data: DataSplitEntity, candidate_dict: Dict[str, CandidateFeatureEntity]) -> List[str]:
  65. # 相关性剔除变量
  66. corr_threshold = self.data_process_config.corr_threshold
  67. breaks_list = self.data_process_config.breaks_list
  68. train_data = data.train_data
  69. x_columns_candidate = list(candidate_dict.keys())
  70. bins = self._f_get_bins_by_breaks(train_data, candidate_dict)
  71. train_woe = sc.woebin_ply(train_data[x_columns_candidate], bins, print_info=False)
  72. corr_df = f_get_corr(train_woe)
  73. corr_dict = corr_df.to_dict()
  74. for column, corr in corr_dict.items():
  75. column = column.replace("_woe", "")
  76. if column not in x_columns_candidate:
  77. continue
  78. for challenger_column, challenger_corr in corr.items():
  79. challenger_column = challenger_column.replace("_woe", "")
  80. if challenger_corr < corr_threshold or column == challenger_column \
  81. or challenger_column not in x_columns_candidate:
  82. continue
  83. iv_max = candidate_dict[column].iv_max
  84. challenger_iv_max = candidate_dict[challenger_column].iv_max
  85. if iv_max > challenger_iv_max:
  86. if challenger_column not in breaks_list.keys():
  87. x_columns_candidate.remove(challenger_column)
  88. else:
  89. if column not in breaks_list.keys():
  90. x_columns_candidate.remove(column)
  91. break
  92. return x_columns_candidate
  93. def _f_wide_filter(self, data: DataSplitEntity) -> Dict:
  94. # 粗筛变量
  95. train_data = data.train_data
  96. test_data = data.test_data
  97. special_values = self.data_process_config.special_values
  98. breaks_list = self.data_process_config.breaks_list.copy()
  99. y_column = self.data_process_config.y_column
  100. iv_threshold_wide = self.data_process_config.iv_threshold_wide
  101. x_columns_candidate = self.data_process_config.x_columns_candidate
  102. if x_columns_candidate is None or len(x_columns_candidate) == 0:
  103. x_columns_candidate = train_data.columns.tolist()
  104. if y_column in x_columns_candidate:
  105. x_columns_candidate.remove(y_column)
  106. bins_train = sc.woebin(train_data[x_columns_candidate + [y_column]], y=y_column, bin_num_limit=5,
  107. special_values=special_values, breaks_list=breaks_list, print_info=False)
  108. for column, bin in bins_train.items():
  109. breaks_list[column] = list(bin['breaks'])
  110. bins_test = None
  111. if test_data is not None and len(test_data) != 0:
  112. bins_test = sc.woebin(test_data[x_columns_candidate + [y_column]], y=y_column,
  113. special_values=special_values, breaks_list=breaks_list, print_info=False)
  114. bins_iv_dict = {}
  115. for column, bin_train in bins_train.items():
  116. train_iv = bin_train['total_iv'][0]
  117. test_iv = 0
  118. if bins_test is not None:
  119. bin_test = bins_test[column]
  120. test_iv = bin_test['total_iv'][0]
  121. iv_max = train_iv + test_iv
  122. if train_iv < iv_threshold_wide:
  123. continue
  124. bins_iv_dict[column] = {"iv_max": iv_max, "breaks_list": breaks_list[column]}
  125. return bins_iv_dict
  126. def _f_get_best_bins_numeric(self, data: DataSplitEntity, x_column: str):
  127. # 贪婪搜索【训练集】及【测试集】加起来【iv】值最高的且【单调】的分箱
  128. interval = self.data_process_config.bin_search_interval
  129. iv_threshold = self.data_process_config.iv_threshold
  130. special_values = self.data_process_config.get_special_values(x_column)
  131. breaks_list = self.data_process_config.get_breaks_list(x_column)
  132. y_column = self.data_process_config.y_column
  133. sample_rate = self.data_process_config.sample_rate
  134. format_bin = self.data_process_config.format_bin
  135. pos_neg_cnt = self.data_process_config.pos_neg_cnt
  136. monto_contrast_change_cnt = self.data_process_config.monto_contrast_change_cnt
  137. def _n0(x):
  138. return sum(x == 0)
  139. def _n1(x):
  140. return sum(x == 1)
  141. def _f_distribute_balls(balls, boxes):
  142. # 计算在 balls - 1 个空位中放入 boxes - 1 个隔板的方法数
  143. total_ways = combinations_with_replacement(range(balls + boxes - 1), boxes - 1)
  144. distribute_list = []
  145. # 遍历所有可能的隔板位置
  146. for combo in total_ways:
  147. # 根据隔板位置分配球
  148. distribution = [0] * boxes
  149. start = 0
  150. for i, divider in enumerate(combo):
  151. distribution[i] = divider - start + 1
  152. start = divider + 1
  153. distribution[-1] = balls - start # 最后一个箱子的球数
  154. # 确保每个箱子至少有一个球
  155. if all(x > 0 for x in distribution):
  156. distribute_list.append(distribution)
  157. return distribute_list
  158. def _get_sv_bins(df, x_column, y_column, special_values):
  159. # special_values_bins
  160. sv_bin_list = []
  161. for special in special_values:
  162. dtm = df[df[x_column] == special]
  163. if len(dtm) != 0:
  164. dtm['bin'] = [str(special)] * len(dtm)
  165. binning = dtm.groupby(['bin'], group_keys=False)[y_column].agg(
  166. [_n0, _n1]).reset_index().rename(columns={'_n0': 'good', '_n1': 'bad'})
  167. binning['is_special_values'] = [True] * len(binning)
  168. sv_bin_list.append(binning)
  169. return sv_bin_list
  170. def _get_bin_left_value(bin: str):
  171. if "," not in bin:
  172. return float(bin)
  173. left = bin.split(",")[0]
  174. return float(left[1:])
  175. def _get_bins(df, x_column, y_column, breaks_list):
  176. dtm = pd.DataFrame({'y': df[y_column], 'value': df[x_column]})
  177. bstbrks = [-np.inf] + breaks_list + [np.inf]
  178. labels = ['[{},{})'.format(bstbrks[i], bstbrks[i + 1]) for i in range(len(bstbrks) - 1)]
  179. dtm.loc[:, 'bin'] = pd.cut(dtm['value'], bstbrks, right=False, labels=labels)
  180. dtm['bin'] = dtm['bin'].astype(str)
  181. bins = dtm.groupby(['bin'], group_keys=False)['y'].agg([_n0, _n1]) \
  182. .reset_index().rename(columns={'_n0': 'good', '_n1': 'bad'})
  183. bins['is_special_values'] = [False] * len(bins)
  184. bins["ordered"] = bins['bin'].apply(_get_bin_left_value)
  185. bins = bins.sort_values(by=["ordered"], ascending=[True])
  186. return bins
  187. def _get_badprob(bins):
  188. bins['count'] = bins['good'] + bins['bad']
  189. bins['badprob'] = bins['bad'] / bins['count']
  190. bad_prob = bins[bins['is_special_values'] == False]['badprob'].values.tolist()
  191. return bad_prob
  192. def _calculation_iv(bins, judge_monto=True, pos_neg_cnt=1):
  193. # 单调性判断
  194. bad_prob = _get_badprob(bins)
  195. if judge_monto and not f_judge_monto(bad_prob, pos_neg_cnt):
  196. return -1
  197. # 计算iv
  198. infovalue = pd.DataFrame({'good': bins['good'], 'bad': bins['bad']}) \
  199. .replace(0, 0.9) \
  200. .assign(
  201. DistrBad=lambda x: x.bad / sum(x.bad),
  202. DistrGood=lambda x: x.good / sum(x.good)
  203. ) \
  204. .assign(iv=lambda x: (x.DistrBad - x.DistrGood) * np.log(x.DistrBad / x.DistrGood)) \
  205. .iv
  206. bins['bin_iv'] = infovalue
  207. bins['total_iv'] = bins['bin_iv'].sum()
  208. iv = bins['total_iv'].values[0]
  209. return iv
  210. def _f_sampling(distribute_list: list, sample_rate: float):
  211. # 采样,完全贪婪搜索耗时太长
  212. sampled_list = distribute_list[::int(1 / sample_rate)]
  213. return sampled_list
  214. train_data = data.train_data
  215. train_data_filter = train_data[~train_data[x_column].isin(special_values)]
  216. train_data_filter = train_data_filter.sort_values(by=x_column, ascending=True)
  217. train_data_x = train_data_filter[x_column]
  218. train_data_x_describe = train_data_x.describe(percentiles=[0.1, 0.9])
  219. train_data_x_max = train_data_x.max()
  220. test_data = data.test_data
  221. test_data_filter = None
  222. if test_data is not None and len(test_data) != 0:
  223. test_data_filter = test_data[~test_data[x_column].isin(special_values)]
  224. test_data_filter = test_data_filter.sort_values(by=x_column, ascending=True)
  225. # 构造数据切分点
  226. # 计算 2 - 5 箱的情况
  227. distribute_list = []
  228. points_list = []
  229. for bin_num in list(range(2, 6)):
  230. distribute_list_cache = _f_distribute_balls(int(1 / interval), bin_num)
  231. # 4箱及以上得采样,不然耗时太久
  232. sample_num = 1000 * sample_rate
  233. if sample_rate <= 0.15:
  234. sample_num *= 2
  235. if bin_num == 4 and len(distribute_list_cache) >= sample_num:
  236. distribute_list_cache = _f_sampling(distribute_list_cache, sample_num / len(distribute_list_cache))
  237. sample_num = 4000 * sample_rate
  238. if bin_num == 5 and len(distribute_list_cache) >= sample_num:
  239. distribute_list_cache = _f_sampling(distribute_list_cache, sample_num / len(distribute_list_cache))
  240. distribute_list.extend(distribute_list_cache)
  241. for distribute in distribute_list:
  242. point_list_cache = []
  243. point_percentile_list = [sum(distribute[0:idx + 1]) * interval for idx, _ in enumerate(distribute[0:-1])]
  244. for point_percentile in point_percentile_list:
  245. point = train_data_x.iloc[int(len(train_data_x) * point_percentile)]
  246. point = float(point)
  247. if format_bin:
  248. point = f_format_bin(train_data_x_describe, point)
  249. point = round(point, 2)
  250. if point == 0:
  251. continue
  252. if point not in point_list_cache and point < train_data_x_max:
  253. point_list_cache.append(point)
  254. if point_list_cache not in points_list and len(point_list_cache) != 0:
  255. points_list.append(point_list_cache)
  256. # IV与单调性过滤
  257. # 获取2 - 5 箱的情况下最佳分箱
  258. bins_enum = {}
  259. iv_max = 0
  260. breaks_list_target = None
  261. judge_monto = True
  262. if len(breaks_list) != 0:
  263. points_list = [breaks_list]
  264. judge_monto = False
  265. train_sv_bin_list = _get_sv_bins(train_data, x_column, y_column, special_values)
  266. test_sv_bin_list = None
  267. if test_data_filter is not None:
  268. test_sv_bin_list = _get_sv_bins(test_data, x_column, y_column, special_values)
  269. for point_list in points_list:
  270. is_discard = 0
  271. discard_reason = ""
  272. is_monto = 1
  273. is_monto_contrast = 1
  274. train_bins = _get_bins(train_data_filter, x_column, y_column, point_list)
  275. # 与special_values合并计算iv
  276. for sv_bin in train_sv_bin_list:
  277. train_bins = pd.concat((train_bins, sv_bin))
  278. # _calculation_iv包含了单调性判断,并排除了特殊值
  279. train_iv = _calculation_iv(train_bins, judge_monto, pos_neg_cnt)
  280. # 只限制训练集的单调性与iv值大小
  281. if train_iv < iv_threshold:
  282. discard_reason = f"训练集iv小于阈值{iv_threshold}"
  283. is_discard = 1
  284. is_monto = 0
  285. test_iv = 0
  286. if test_data_filter is not None:
  287. test_bins = _get_bins(test_data_filter, x_column, y_column, point_list)
  288. for sv_bin in test_sv_bin_list:
  289. test_bins = pd.concat((test_bins, sv_bin))
  290. test_iv = _calculation_iv(test_bins, judge_monto, pos_neg_cnt)
  291. # 趋势一致性判断
  292. train_bad_prob = _get_badprob(train_bins)
  293. test_bad_prob = _get_badprob(test_bins)
  294. if not f_monto_contrast(train_bad_prob, test_bad_prob, monto_contrast_change_cnt) \
  295. and len(breaks_list) == 0:
  296. discard_reason = f"变量趋势一致性不够"
  297. is_discard = 1
  298. is_monto_contrast = 0
  299. iv = train_iv + test_iv
  300. if len(breaks_list) == 0:
  301. bin_num = len(point_list) + 1
  302. if bin_num not in bins_enum.keys():
  303. bins_enum[bin_num] = []
  304. bins_enum[bin_num].append({
  305. "is_discard": is_discard,
  306. "is_monto": is_monto,
  307. "is_monto_contrast": is_monto_contrast,
  308. "discard_reason": discard_reason,
  309. "point_list": point_list,
  310. "iv": iv,
  311. })
  312. if iv > iv_max and not is_discard:
  313. iv_max = iv
  314. breaks_list_target = point_list
  315. # 各个分箱数下的最佳分箱点
  316. bins_enum_best_point = []
  317. for k, v in bins_enum.items():
  318. df_bin_enum = pd.DataFrame(data=v)
  319. df_bin_enum.sort_values(by=["is_discard", "is_monto", "is_monto_contrast", "iv"],
  320. ascending=[True, False, False, False], inplace=True)
  321. bins_enum_best_point.append(df_bin_enum.iloc[0]["point_list"])
  322. return iv_max, breaks_list_target, bins_enum_best_point
  323. def filter(self, data: DataSplitEntity, *args, **kwargs) -> Tuple[
  324. Dict[str, CandidateFeatureEntity], Dict[str, List[CandidateFeatureEntity]]]:
  325. # 粗筛
  326. bins_iv_dict = self._f_wide_filter(data)
  327. x_columns_candidate = list(bins_iv_dict.keys())
  328. candidate_num = self.data_process_config.candidate_num
  329. candidate_dict: Dict[str, CandidateFeatureEntity] = {}
  330. numeric_candidate_dict_all: Dict[str, List[CandidateFeatureEntity]] = {}
  331. for x_column in tqdm(x_columns_candidate):
  332. if is_numeric_dtype(data.train_data[x_column]):
  333. iv_max, breaks_list, bins_enum_best_point = self._f_get_best_bins_numeric(data, x_column)
  334. if len(bins_enum_best_point) != 0:
  335. numeric_candidate_dict_all[x_column] = []
  336. for point in bins_enum_best_point:
  337. numeric_candidate_dict_all[x_column].append(CandidateFeatureEntity(x_column, point, 0))
  338. if breaks_list is None:
  339. continue
  340. candidate_dict[x_column] = CandidateFeatureEntity(x_column, breaks_list, iv_max)
  341. else:
  342. # 字符型暂时用scorecardpy来处理
  343. candidate_dict[x_column] = CandidateFeatureEntity(x_column, bins_iv_dict[x_column]["breaks_list"],
  344. bins_iv_dict[x_column]["iv_max"])
  345. # 相关性进一步剔除变量
  346. x_columns_candidate = self._f_corr_filter(data, candidate_dict)
  347. candidate_list: List[CandidateFeatureEntity] = []
  348. for x_column, v in candidate_dict.items():
  349. if x_column in x_columns_candidate:
  350. candidate_list.append(v)
  351. candidate_list.sort(key=lambda x: x.iv_max, reverse=True)
  352. candidate_list = candidate_list[0:candidate_num]
  353. candidate_dict = {}
  354. for candidate in candidate_list:
  355. candidate_dict[candidate.x_column] = candidate
  356. return candidate_dict, numeric_candidate_dict_all
  357. def feature_generate(self, data: DataSplitEntity, candidate_dict: Dict[str, CandidateFeatureEntity], *args,
  358. **kwargs) -> DataPreparedEntity:
  359. train_data = data.train_data
  360. val_data = data.val_data
  361. test_data = data.test_data
  362. y_column = self.data_process_config.y_column
  363. x_columns_candidate = list(candidate_dict.keys())
  364. bins = self._f_get_bins_by_breaks(train_data, candidate_dict)
  365. train_woe = sc.woebin_ply(train_data[x_columns_candidate], bins, print_info=False)
  366. train_data_feature = DataFeatureEntity(pd.concat((train_woe, train_data[y_column]), axis=1),
  367. train_woe.columns.tolist(), y_column)
  368. val_data_feature = None
  369. if val_data is not None and len(val_data) != 0:
  370. val_woe = sc.woebin_ply(val_data[x_columns_candidate], bins, print_info=False)
  371. val_data_feature = DataFeatureEntity(pd.concat((val_woe, val_data[y_column]), axis=1),
  372. train_woe.columns.tolist(), y_column)
  373. test_data_feature = None
  374. if test_data is not None and len(test_data) != 0:
  375. test_woe = sc.woebin_ply(test_data[x_columns_candidate], bins, print_info=False)
  376. test_data_feature = DataFeatureEntity(pd.concat((test_woe, test_data[y_column]), axis=1),
  377. train_woe.columns.tolist(), y_column)
  378. return DataPreparedEntity(train_data_feature, val_data_feature, test_data_feature, bins=bins,
  379. data_split_original=data)
  380. def feature_report(self, data: DataSplitEntity, candidate_dict: Dict[str, CandidateFeatureEntity],
  381. numeric_candidate_dict_all: Dict[str, List[CandidateFeatureEntity]],
  382. *args, **kwargs) -> Dict[str, MetricFucEntity]:
  383. y_column = self.data_process_config.y_column
  384. jupyter = self.data_process_config.jupyter
  385. x_columns_candidate = list(candidate_dict.keys())
  386. train_data = data.train_data
  387. test_data = data.test_data
  388. metric_value_dict = {}
  389. # 样本分布
  390. metric_value_dict["样本分布"] = MetricFucEntity(table=data.get_distribution(y_column), table_font_size=10,
  391. table_cell_width=3)
  392. # 变量iv及psi
  393. train_bins = self._f_get_bins_by_breaks(train_data, candidate_dict)
  394. train_iv = self._f_get_iv_by_bins(train_bins)
  395. if test_data is not None and len(test_data) != 0:
  396. # 计算psi仅需把y改成识别各自训练集测试集即可
  397. psi_df = pd.concat((train_data, test_data))
  398. psi_df["#target#"] = [1] * len(train_data) + [0] * len(test_data)
  399. psi = self._f_get_bins_by_breaks(psi_df, candidate_dict, y_column="#target#")
  400. psi = self._f_get_iv_by_bins(psi)
  401. psi.columns = ['变量', 'psi']
  402. train_iv = pd.merge(train_iv, psi, on="变量", how="left")
  403. # 变量趋势-测试集
  404. test_bins = self._f_get_bins_by_breaks(test_data, candidate_dict)
  405. image_path_list = self._f_save_var_trend(test_bins, x_columns_candidate, "test")
  406. metric_value_dict["变量趋势-测试集"] = MetricFucEntity(image_path=image_path_list, image_size=4)
  407. metric_value_dict["变量iv"] = MetricFucEntity(table=train_iv, table_font_size=10, table_cell_width=3)
  408. # 变量趋势-训练集
  409. image_path_list = self._f_save_var_trend(train_bins, x_columns_candidate, "train")
  410. metric_value_dict["变量趋势-训练集"] = MetricFucEntity(image_path=image_path_list, image_size=4)
  411. # 变量有效性
  412. train_woe = sc.woebin_ply(train_data[x_columns_candidate], train_bins, print_info=False)
  413. var_corr_image_path = self._f_get_var_corr_image(train_woe)
  414. # vif
  415. vif_df = f_get_ivf(train_woe)
  416. metric_value_dict["变量有效性"] = MetricFucEntity(image_path=var_corr_image_path, table=vif_df)
  417. if jupyter:
  418. from IPython import display
  419. display.display(metric_value_dict["样本分布"].table)
  420. # 打印变量iv
  421. display.display(metric_value_dict["变量iv"].table)
  422. # 打印vif
  423. display.display(metric_value_dict["变量有效性"].table)
  424. # 打印变量相关性
  425. f_display_images_by_side(metric_value_dict["变量有效性"].image_path, display, width=800)
  426. # 打印变量趋势
  427. var_trend_train = metric_value_dict["变量趋势-训练集"].image_path
  428. var_trend_test = None
  429. metric_test = metric_value_dict.get("变量趋势-测试集")
  430. if metric_test is not None:
  431. var_trend_test = metric_test.image_path
  432. f_display_images_by_side(var_trend_train, display, title="变量趋势训练集", image_path_list2=var_trend_test,
  433. title2="变量趋势测试集")
  434. # 打印breaks_list
  435. breaks_list = {}
  436. for x_column, feature in candidate_dict.items():
  437. breaks_list[x_column] = feature.breaks_list
  438. print("变量切分点:")
  439. print(json.dumps(breaks_list, ensure_ascii=False, indent=2, cls=NumpyEncoder))
  440. # 打印所有变量的推荐切分点
  441. print("-----不同分箱数下变量的推荐切分点-----")
  442. for x_column, features in numeric_candidate_dict_all.items():
  443. print(f"-----【{x_column}】-----")
  444. print(f"切分点:")
  445. var_trend_images_train = []
  446. var_trend_images_test = []
  447. for feature in features:
  448. print(json.dumps(feature.breaks_list, ensure_ascii=False, cls=NumpyEncoder))
  449. var_breaks_list = [str(i) for i in feature.breaks_list]
  450. var_trend_bins_train = self._f_get_bins_by_breaks(train_data, {x_column: feature})
  451. image_path = self._f_save_var_trend(var_trend_bins_train, [x_column],
  452. f"train_{x_column}_{'_'.join(var_breaks_list)}")
  453. var_trend_images_train.append(image_path[0])
  454. if metric_test is not None:
  455. var_trend_bins_test = self._f_get_bins_by_breaks(test_data, {x_column: feature})
  456. image_path = self._f_save_var_trend(var_trend_bins_test, [x_column],
  457. f"test_{x_column}_{'_'.join(var_breaks_list)}")
  458. var_trend_images_test.append(image_path[0])
  459. f_display_images_by_side(var_trend_images_train, display, title=f"训练集",
  460. image_path_list2=var_trend_images_test,
  461. title2="测试集")
  462. return metric_value_dict