from sklearn.model_selection import train_test_split
from sklearn.datasets import load_breast_cancer
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings(action='ignore')
cancer = load_breast_cancer()
cancer_df = pd.DataFrame(cancer.data, columns=cancer.feature_names)
cancer_df['target'] = cancer.target
cancer_df
mean radius | mean texture | mean perimeter | mean area | mean smoothness | mean compactness | mean concavity | mean concave points | mean symmetry | mean fractal dimension | ... | worst texture | worst perimeter | worst area | worst smoothness | worst compactness | worst concavity | worst concave points | worst symmetry | worst fractal dimension | target | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 17.99 | 10.38 | 122.80 | 1001.0 | 0.11840 | 0.27760 | 0.30010 | 0.14710 | 0.2419 | 0.07871 | ... | 17.33 | 184.60 | 2019.0 | 0.16220 | 0.66560 | 0.7119 | 0.2654 | 0.4601 | 0.11890 | 0 |
1 | 20.57 | 17.77 | 132.90 | 1326.0 | 0.08474 | 0.07864 | 0.08690 | 0.07017 | 0.1812 | 0.05667 | ... | 23.41 | 158.80 | 1956.0 | 0.12380 | 0.18660 | 0.2416 | 0.1860 | 0.2750 | 0.08902 | 0 |
2 | 19.69 | 21.25 | 130.00 | 1203.0 | 0.10960 | 0.15990 | 0.19740 | 0.12790 | 0.2069 | 0.05999 | ... | 25.53 | 152.50 | 1709.0 | 0.14440 | 0.42450 | 0.4504 | 0.2430 | 0.3613 | 0.08758 | 0 |
3 | 11.42 | 20.38 | 77.58 | 386.1 | 0.14250 | 0.28390 | 0.24140 | 0.10520 | 0.2597 | 0.09744 | ... | 26.50 | 98.87 | 567.7 | 0.20980 | 0.86630 | 0.6869 | 0.2575 | 0.6638 | 0.17300 | 0 |
4 | 20.29 | 14.34 | 135.10 | 1297.0 | 0.10030 | 0.13280 | 0.19800 | 0.10430 | 0.1809 | 0.05883 | ... | 16.67 | 152.20 | 1575.0 | 0.13740 | 0.20500 | 0.4000 | 0.1625 | 0.2364 | 0.07678 | 0 |
... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... | ... |
564 | 21.56 | 22.39 | 142.00 | 1479.0 | 0.11100 | 0.11590 | 0.24390 | 0.13890 | 0.1726 | 0.05623 | ... | 26.40 | 166.10 | 2027.0 | 0.14100 | 0.21130 | 0.4107 | 0.2216 | 0.2060 | 0.07115 | 0 |
565 | 20.13 | 28.25 | 131.20 | 1261.0 | 0.09780 | 0.10340 | 0.14400 | 0.09791 | 0.1752 | 0.05533 | ... | 38.25 | 155.00 | 1731.0 | 0.11660 | 0.19220 | 0.3215 | 0.1628 | 0.2572 | 0.06637 | 0 |
566 | 16.60 | 28.08 | 108.30 | 858.1 | 0.08455 | 0.10230 | 0.09251 | 0.05302 | 0.1590 | 0.05648 | ... | 34.12 | 126.70 | 1124.0 | 0.11390 | 0.30940 | 0.3403 | 0.1418 | 0.2218 | 0.07820 | 0 |
567 | 20.60 | 29.33 | 140.10 | 1265.0 | 0.11780 | 0.27700 | 0.35140 | 0.15200 | 0.2397 | 0.07016 | ... | 39.42 | 184.60 | 1821.0 | 0.16500 | 0.86810 | 0.9387 | 0.2650 | 0.4087 | 0.12400 | 0 |
568 | 7.76 | 24.54 | 47.92 | 181.0 | 0.05263 | 0.04362 | 0.00000 | 0.00000 | 0.1587 | 0.05884 | ... | 30.37 | 59.16 | 268.6 | 0.08996 | 0.06444 | 0.0000 | 0.0000 | 0.2871 | 0.07039 | 1 |
569 rows × 31 columns
# 데이터를 준비합니다.
X = cancer_df.drop(['target'], axis=1)
y = cancer_df['target']
X.shape, y.shape
((569, 30), (569,))
# 기본 모델을 초기화합니다.
rf_model = RandomForestClassifier()
lr_model = LogisticRegression()
knn_model = KNeighborsClassifier()
# 데이터를 훈련 세트와 테스트 세트로 나눕니다.
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=42)
### 학습용 세트를 학습과 검증 세트로 나눈다.
X_tr, X_val, y_tr, y_val = train_test_split(X_train, y_train,
test_size=0.2, random_state=42)
# 기본 모델을 훈련시키고 예측 결과를 저장합니다.
rf_model.fit(X_tr, y_tr)
rf_pred_val = rf_model.predict(X_val)
lr_model.fit(X_tr, y_tr)
lr_pred_val = lr_model.predict(X_val)
knn_model.fit(X_tr, y_tr)
knn_pred_val = knn_model.predict(X_val)
# 예측 결과를 기반으로 메타 모델을 훈련시킵니다.
meta_features_pred_val = [rf_pred_val, lr_pred_val, knn_pred_val]
meta_X = np.array(meta_features_pred_val).T
meta_X[0:3]
array([[0, 0, 0], [1, 1, 1], [1, 1, 1]])
meta_y = y_val
print(type(meta_X), type(meta_y))
meta_X.shape, meta_y.shape
<class 'numpy.ndarray'> <class 'pandas.core.series.Series'>
((91, 3), (91,))
meta_model = RandomForestClassifier()
meta_model.fit(meta_X, meta_y)
RandomForestClassifier()
rf_pred_test = rf_model.predict(X_test)
lr_pred_test = lr_model.predict(X_test)
knn_pred_test = knn_model.predict(X_test)
# 테스트 데이터에 대한 예측을 생성합니다.
test_meta_features = [rf_pred_test, lr_pred_test, knn_pred_test]
test_meta_X = np.array(test_meta_features).T
# 메타 모델을 사용하여 테스트 데이터에 대한 최종 예측을 생성합니다.
final_pred = meta_model.predict(test_meta_X)
# 최종 예측의 정확도를 평가합니다.
accuracy = accuracy_score(y_test, final_pred)
print("Accuracy:", accuracy)
Accuracy: 0.9649122807017544