@@ -31,16 +31,27 @@ import (
3131 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3232 "k8s.io/apimachinery/pkg/labels"
3333 "k8s.io/apimachinery/pkg/runtime"
34+ "k8s.io/apimachinery/pkg/types"
3435 "k8s.io/utils/ptr"
3536 "sigs.k8s.io/controller-runtime/pkg/client"
3637)
3738
38- func getTestGooseFSEngineNode (client client.Client , name string , namespace string , withRunTime bool ) * GooseFSEngine {
39+ const (
40+ testNodeNamespace = "big-data"
41+ testNodeLabelApp = "goosefs"
42+ testNodeLabelRole = "goosefs-worker"
43+ testNodeAPIVersion = "apps/v1"
44+ testNodeKindSts = "StatefulSet"
45+ testNodeLabelDataset = "fluid.io/dataset"
46+ testNodeLabelSelector = "%s=true"
47+ )
48+
49+ func getTestGooseFSEngineNode (c client.Client , name string , namespace string , withRunTime bool ) * GooseFSEngine {
3950 engine := & GooseFSEngine {
4051 runtime : nil ,
4152 name : name ,
4253 namespace : namespace ,
43- Client : client ,
54+ Client : c ,
4455 runtimeInfo : nil ,
4556 Log : fake .NullLogger (),
4657 }
@@ -53,10 +64,8 @@ func getTestGooseFSEngineNode(client client.Client, name string, namespace strin
5364
5465func TestSyncScheduleInfoToCacheNodes (t * testing.T ) {
5566 type fields struct {
56- // runtime *datav1alpha1.GooseFSRuntime
5767 worker * appsv1.StatefulSet
5868 pods []* v1.Pod
59- ds * appsv1.DaemonSet
6069 nodes []* v1.Node
6170 name string
6271 namespace string
@@ -65,229 +74,158 @@ func TestSyncScheduleInfoToCacheNodes(t *testing.T) {
6574 name string
6675 fields fields
6776 nodeNames []string
68- }{
69- {
70- name : "create" ,
71- fields : fields {
72- name : "spark" ,
73- namespace : "big-data" ,
74- worker : & appsv1.StatefulSet {
75- TypeMeta : metav1.TypeMeta {
76- Kind : "StatefulSet" ,
77- APIVersion : "apps/v1" ,
78- },
79- ObjectMeta : metav1.ObjectMeta {
80- Name : "spark-worker" ,
81- Namespace : "big-data" ,
82- UID : "uid1" ,
83- },
84- Spec : appsv1.StatefulSetSpec {
85- Selector : & metav1.LabelSelector {
86- MatchLabels : map [string ]string {
87- "app" : "goosefs" ,
88- "role" : "goosefs-worker" ,
89- "release" : "spark" ,
90- },
91- },
92- },
77+ }{}
78+
79+ testcaseCnt := 0
80+ makeDatasetResourcesFn := func (dsName string , dsNamespace string , stsPodNodeNames []string ) fields {
81+ testcaseCnt ++
82+ ret := fields {
83+ name : dsName ,
84+ namespace : dsNamespace ,
85+ worker : & appsv1.StatefulSet {
86+ TypeMeta : metav1.TypeMeta {
87+ Kind : testNodeKindSts ,
88+ APIVersion : testNodeAPIVersion ,
9389 },
94- pods : []* v1.Pod {
95- {
96- ObjectMeta : metav1.ObjectMeta {
97- Name : "spark-worker-0" ,
98- Namespace : "big-data" ,
99- OwnerReferences : []metav1.OwnerReference {{
100- Kind : "StatefulSet" ,
101- APIVersion : "apps/v1" ,
102- Name : "spark-worker" ,
103- UID : "uid1" ,
104- Controller : ptr .To (true ),
105- }},
106- Labels : map [string ]string {
107- "app" : "goosefs" ,
108- "role" : "goosefs-worker" ,
109- "release" : "spark" ,
110- "fluid.io/dataset" : "big-data-spark" ,
111- },
112- },
113- Spec : v1.PodSpec {
114- NodeName : "node1" ,
115- },
116- },
90+ ObjectMeta : metav1.ObjectMeta {
91+ Name : dsName + "-worker" ,
92+ Namespace : dsNamespace ,
93+ UID : types .UID (fmt .Sprintf ("uid%d" , testcaseCnt )),
11794 },
118- nodes : []* v1.Node {
119- {
120- ObjectMeta : metav1.ObjectMeta {
121- Name : "node1" ,
95+ Spec : appsv1.StatefulSetSpec {
96+ Selector : & metav1.LabelSelector {
97+ MatchLabels : map [string ]string {
98+ "app" : testNodeLabelApp ,
99+ "role" : testNodeLabelRole ,
100+ "release" : dsName ,
122101 },
123102 },
124103 },
125104 },
126- nodeNames : []string {"node1" },
127- }, {
128- name : "add" ,
129- fields : fields {
130- name : "hbase" ,
131- namespace : "big-data" ,
132- worker : & appsv1.StatefulSet {
133- TypeMeta : metav1.TypeMeta {
134- Kind : "StatefulSet" ,
135- APIVersion : "apps/v1" ,
136- },
137- ObjectMeta : metav1.ObjectMeta {
138- Name : "hbase-worker" ,
139- Namespace : "big-data" ,
140- UID : "uid2" ,
141- },
142- Spec : appsv1.StatefulSetSpec {
143- Selector : & metav1.LabelSelector {
144- MatchLabels : map [string ]string {
145- "app" : "goosefs" ,
146- "role" : "goosefs-worker" ,
147- "release" : "hbase" ,
148- },
149- },
150- },
151- },
152- pods : []* v1.Pod {
153- {
154- ObjectMeta : metav1.ObjectMeta {
155- Name : "hbase-worker-0" ,
156- Namespace : "big-data" ,
157- OwnerReferences : []metav1.OwnerReference {{
158- Kind : "StatefulSet" ,
159- APIVersion : "apps/v1" ,
160- Name : "hbase-worker" ,
161- UID : "uid2" ,
162- Controller : ptr .To (true ),
163- }},
164- Labels : map [string ]string {
165- "app" : "goosefs" ,
166- "role" : "goosefs-worker" ,
167- "release" : "hbase" ,
168- "fluid.io/dataset" : "big-data-hbase" ,
169- },
170- },
171- Spec : v1.PodSpec {
172- NodeName : "node3" ,
173- },
174- },
175- },
176- nodes : []* v1.Node {
177- {
178- ObjectMeta : metav1.ObjectMeta {
179- Name : "node3" ,
180- },
181- }, {
182- ObjectMeta : metav1.ObjectMeta {
183- Name : "node2" ,
184- Labels : map [string ]string {
185- "fluid.io/s-default-hbase" : "true" ,
186- },
187- },
188- },
189- },
190- },
191- nodeNames : []string {"node3" },
192- }, {
193- name : "noController" ,
194- fields : fields {
195- name : "hbase-a" ,
196- namespace : "big-data" ,
197- worker : & appsv1.StatefulSet {
198- TypeMeta : metav1.TypeMeta {
199- Kind : "StatefulSet" ,
200- APIVersion : "apps/v1" ,
201- },
202- ObjectMeta : metav1.ObjectMeta {
203- Name : "hbase-a-worker" ,
204- Namespace : "big-data" ,
205- UID : "uid3" ,
206- },
207- Spec : appsv1.StatefulSetSpec {
208- Selector : & metav1.LabelSelector {
209- MatchLabels : map [string ]string {
210- "app" : "goosefs" ,
211- "role" : "goosefs-worker" ,
212- "release" : "hbase-a" ,
213- },
214- },
215- },
216- },
217- pods : []* v1.Pod {
218- {
219- ObjectMeta : metav1.ObjectMeta {
220- Name : "hbase-a-worker-0" ,
221- Namespace : "big-data" ,
222- Labels : map [string ]string {
223- "app" : "goosefs" ,
224- "role" : "goosefs-worker" ,
225- "release" : "hbase-a" ,
226- "fluid.io/dataset" : "big-data-hbase-a" ,
227- },
228- },
229- Spec : v1.PodSpec {
230- NodeName : "node5" ,
231- },
105+ pods : []* v1.Pod {},
106+ }
107+
108+ for idx , nodeName := range stsPodNodeNames {
109+ ret .pods = append (ret .pods , & v1.Pod {
110+ ObjectMeta : metav1.ObjectMeta {
111+ Name : fmt .Sprintf ("%s-worker-%d" , dsName , idx ),
112+ Namespace : dsNamespace ,
113+ OwnerReferences : []metav1.OwnerReference {{
114+ Kind : testNodeKindSts ,
115+ APIVersion : testNodeAPIVersion ,
116+ Name : dsName + "-worker" ,
117+ UID : types .UID (fmt .Sprintf ("uid%d" , testcaseCnt )),
118+ Controller : ptr .To (true ),
119+ }},
120+ Labels : map [string ]string {
121+ "app" : testNodeLabelApp ,
122+ "role" : testNodeLabelRole ,
123+ "release" : dsName ,
124+ testNodeLabelDataset : fmt .Sprintf ("%s-%s" , dsNamespace , dsName ),
232125 },
233126 },
234- nodes : []* v1.Node {
235- {
236- ObjectMeta : metav1.ObjectMeta {
237- Name : "node5" ,
238- },
239- }, {
240- ObjectMeta : metav1.ObjectMeta {
241- Name : "node4" ,
242- Labels : map [string ]string {
243- "fluid.io/s-default-hbase-a" : "true" ,
244- },
245- },
246- },
127+ Spec : v1.PodSpec {
128+ NodeName : nodeName ,
247129 },
248- },
249- nodeNames : []string {},
250- },
130+ })
131+ }
132+
133+ return ret
251134 }
252135
136+ fields1 := makeDatasetResourcesFn ("spark" , testNodeNamespace , []string {"node1" })
137+ fields1 .nodes = append (fields1 .nodes , & v1.Node {ObjectMeta : metav1.ObjectMeta {Name : "node1" }})
138+ testcases = append (testcases , struct {
139+ name string
140+ fields fields
141+ nodeNames []string
142+ }{
143+ name : "create" ,
144+ fields : fields1 ,
145+ nodeNames : []string {"node1" },
146+ })
147+
148+ fields2 := makeDatasetResourcesFn ("hbase" , testNodeNamespace , []string {"node2" , "node3" })
149+ fields2 .nodes = append (fields2 .nodes ,
150+ & v1.Node {ObjectMeta : metav1.ObjectMeta {Name : "node3" }},
151+ & v1.Node {ObjectMeta : metav1.ObjectMeta {Name : "node2" , Labels : map [string ]string {"fluid.io/s-big-data-hbase" : "true" }}},
152+ )
153+ testcases = append (testcases , struct {
154+ name string
155+ fields fields
156+ nodeNames []string
157+ }{
158+ name : "add" ,
159+ fields : fields2 ,
160+ nodeNames : []string {"node2" , "node3" },
161+ })
162+
163+ fields3 := makeDatasetResourcesFn ("hbase-a" , testNodeNamespace , []string {"node4" , "node5" })
164+ fields3 .pods [1 ].OwnerReferences = []metav1.OwnerReference {}
165+ fields3 .nodes = append (fields3 .nodes ,
166+ & v1.Node {ObjectMeta : metav1.ObjectMeta {Name : "node5" }},
167+ & v1.Node {ObjectMeta : metav1.ObjectMeta {Name : "node4" , Labels : map [string ]string {"fluid.io/s-big-data-hbase-a" : "true" }}},
168+ )
169+ testcases = append (testcases , struct {
170+ name string
171+ fields fields
172+ nodeNames []string
173+ }{
174+ name : "noController" ,
175+ fields : fields3 ,
176+ nodeNames : []string {"node4" },
177+ })
178+
179+ fields4 := makeDatasetResourcesFn ("hbase-b" , testNodeNamespace , []string {})
180+ fields4 .nodes = append (fields4 .nodes ,
181+ & v1.Node {ObjectMeta : metav1.ObjectMeta {Name : "node6" , Labels : map [string ]string {"fluid.io/s-big-data-hbase-b" : "true" , "fluid.io/s-goosefs-big-data-hbase-b" : "true" }}},
182+ )
183+ testcases = append (testcases , struct {
184+ name string
185+ fields fields
186+ nodeNames []string
187+ }{
188+ name : "remove" ,
189+ fields : fields4 ,
190+ nodeNames : []string {},
191+ })
192+
253193 runtimeObjs := []runtime.Object {}
254194
255195 for _ , testcase := range testcases {
256196 runtimeObjs = append (runtimeObjs , testcase .fields .worker )
257197
258- if testcase .fields .ds != nil {
259- runtimeObjs = append (runtimeObjs , testcase .fields .ds )
260- }
261198 for _ , pod := range testcase .fields .pods {
262199 runtimeObjs = append (runtimeObjs , pod )
263200 }
264201
265202 for _ , node := range testcase .fields .nodes {
266203 runtimeObjs = append (runtimeObjs , node )
267204 }
268- // runtimeObjs = append(runtimeObjs, testcase.fields.pods)
269205 }
270206 c := fake .NewFakeClientWithScheme (testScheme , runtimeObjs ... )
271207
272208 for _ , testcase := range testcases {
273209 engine := getTestGooseFSEngineNode (c , testcase .fields .name , testcase .fields .namespace , true )
274210 err := engine .SyncScheduleInfoToCacheNodes ()
275211 if err != nil {
276- t .Errorf ("Got error %t." , err )
212+ t .Errorf ("testcase %s: Got error %v" , testcase .name , err )
213+ continue
277214 }
278215
279216 nodeList := & v1.NodeList {}
280- datasetLabels , err := labels .Parse (fmt .Sprintf ("%s=true" , engine .runtimeInfo .GetCommonLabelName ()))
281- if err != nil {
282- return
217+ datasetLabels , parseErr := labels .Parse (fmt .Sprintf (testNodeLabelSelector , engine .runtimeInfo .GetCommonLabelName ()))
218+ if parseErr != nil {
219+ t . Fatalf ( "testcase %s: Got error parsing labels: %v" , testcase . name , parseErr )
283220 }
284221
285- err = c .List (context .TODO (), nodeList , & client.ListOptions {
222+ listErr : = c .List (context .TODO (), nodeList , & client.ListOptions {
286223 LabelSelector : datasetLabels ,
287224 })
288225
289- if err != nil {
290- t .Errorf ("Got error %t." , err )
226+ if listErr != nil {
227+ t .Errorf ("testcase %s: Got error listing nodes: %v" , testcase .name , listErr )
228+ continue
291229 }
292230
293231 nodeNames := []string {}
0 commit comments