-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain_Agglomerative.py
More file actions
170 lines (149 loc) · 5.64 KB
/
main_Agglomerative.py
File metadata and controls
170 lines (149 loc) · 5.64 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import silhouette_score, calinski_harabasz_score, davies_bouldin_score
from sklearn.preprocessing import StandardScaler
from AgglomerativeClustering_diy import AgglomerativeClustering_diy
from data import data_loader
from data import evaluation
train_df = data_loader().data1
'''
实现了AgglomerativeClustering
'''
# # 读取数据
# data = pd.read_csv('Country-data.csv')
#
# # 提取特征
# features = data.drop('country', axis=1)
#
# # 标准化数据
# scaler = StandardScaler()
# features_scaled = scaler.fit_transform(features)
# 计算评估指标
n_clusters_range = range(2, 10) # 聚类数量范围
linkage_methods = ['ward', 'complete', 'average']
affinity_methods = ['euclidean', 'manhattan', 'cosine']
for linkage_method in linkage_methods:
for affinity_method in affinity_methods:
if linkage_method == 'ward' and affinity_method != 'euclidean':
# Ward linkage only supports euclidean distance
continue
silhouette_scores = []
calinski_harabasz_scores = []
davies_bouldin_scores = []
for n_clusters in n_clusters_range:
# 使用Agglomerative Clustering
agg_cluster = AgglomerativeClustering(
n_clusters=n_clusters, linkage=linkage_method, metric=affinity_method
)
clusters = agg_cluster.fit_predict(train_df)
# 检查聚类的唯一标签数量
unique_labels = np.unique(clusters)
if len(unique_labels) < n_clusters:
# 如果未能形成指定数量的聚类,跳过该参数组合
continue
# 计算评估指标
silhouette_avg = silhouette_score(train_df, clusters)
calinski_harabasz_score_val = calinski_harabasz_score(train_df, clusters)
davies_bouldin_score_val = davies_bouldin_score(train_df, clusters)
silhouette_scores.append(silhouette_avg)
calinski_harabasz_scores.append(calinski_harabasz_score_val)
davies_bouldin_scores.append(davies_bouldin_score_val)
# 可视化
plt.figure(figsize=(12, 4))
# Silhouette Score
plt.subplot(1, 3, 1)
plt.plot(list(n_clusters_range)[:len(silhouette_scores)], silhouette_scores, marker='o')
plt.title(f'Silhouette Score ({linkage_method}, {affinity_method})')
plt.xlabel('Number of Clusters')
plt.ylabel('Score')
# Calinski-Harabasz Score
plt.subplot(1, 3, 2)
plt.plot(list(n_clusters_range)[:len(calinski_harabasz_scores)], calinski_harabasz_scores, marker='o')
plt.title(f'Calinski-Harabasz Score ({linkage_method}, {affinity_method})')
plt.xlabel('Number of Clusters')
plt.ylabel('Score')
# Davies-Bouldin Score
plt.subplot(1, 3, 3)
plt.plot(list(n_clusters_range)[:len(davies_bouldin_scores)], davies_bouldin_scores, marker='o')
plt.title(f'Davies-Bouldin Score ({linkage_method}, {affinity_method})')
plt.xlabel('Number of Clusters')
plt.ylabel('Score')
plt.tight_layout()
plt.show()
'''
结果可视化部分
'''
# agg_cluster = AgglomerativeClustering(
# n_clusters=3, linkage=linkage_method, metric=affinity_method
# )
#
# clusters = agg_cluster.fit_predict(train_df)
# evaluation(data_loader().data1, clusters, data_loader().country, agg_cluster)
'''
实现了手写版AgglomerativeClustering
'''
# # 读取数据
# data = pd.read_csv('Country-data.csv')
#
# # 提取特征
# features = data.drop('country', axis=1)
#
# # 标准化数据
# scaler = StandardScaler()
# features_scaled = scaler.fit_transform(features)
#
# # 计算评估指标
# n_clusters_range = range(2, 10) # 聚类数量范围
#
# silhouette_scores = []
# calinski_harabasz_scores = []
# davies_bouldin_scores = []
#
# for n_clusters in n_clusters_range:
# # 使用Agglomerative Clustering
# agg_cluster = AgglomerativeClustering_diy(n_clusters=n_clusters)
# clusters = agg_cluster.fit_predict(features_scaled)
#
# # 计算评估指标
# silhouette_avg = silhouette_score(features_scaled, clusters)
# calinski_harabasz_score_val = calinski_harabasz_score(features_scaled, clusters)
# davies_bouldin_score_val = davies_bouldin_score(features_scaled, clusters)
#
# silhouette_scores.append(silhouette_avg)
# calinski_harabasz_scores.append(calinski_harabasz_score_val)
# davies_bouldin_scores.append(davies_bouldin_score_val)
#
# # 可视化
# plt.figure(figsize=(12, 4))
#
# # Silhouette Score
# plt.subplot(1, 3, 1)
# plt.plot(n_clusters_range, silhouette_scores, marker='o')
# plt.title('Silhouette Score')
# plt.xlabel('Number of Clusters')
# plt.ylabel('Score')
#
# # Calinski-Harabasz Score
# plt.subplot(1, 3, 2)
# plt.plot(n_clusters_range, calinski_harabasz_scores, marker='o')
# plt.title('Calinski-Harabasz Score')
# plt.xlabel('Number of Clusters')
# plt.ylabel('Score')
#
# # Davies-Bouldin Score
# plt.subplot(1, 3, 3)
# plt.plot(n_clusters_range, davies_bouldin_scores, marker='o')
# plt.title('Davies-Bouldin Score')
# plt.xlabel('Number of Clusters')
# plt.ylabel('Score')
#
# plt.tight_layout()
# plt.show()
# agg_cluster = AgglomerativeClustering(
# n_clusters=3
# )
#
# clusters = agg_cluster.fit_predict(train_df)
# evaluation(data_loader().data1, clusters, data_loader().country, agg_cluster)