1
1
import datetime
2
2
3
- from treeherder .model .models import Group , GroupStatus , Job , Push
3
+ from treeherder .model .models import Group , GroupStatus , Job , Push , TextLogError
4
+
5
+
6
+ def classify (jobs_to_classify , jobs_to_unclassify ):
7
+ # TODO: consider job.result=(busted, exception)
8
+ if jobs_to_classify :
9
+ target_jobs = Job .objects .filter (
10
+ id__in = jobs_to_classify , result = "testfailed" , failure_classification_id__in = [1 , 6 ]
11
+ )
12
+ if target_jobs :
13
+ target_jobs .update (failure_classification_id = 8 )
14
+
15
+ if jobs_to_unclassify :
16
+ # TODO: query text_log_error for new_failure and use 6 if previously set
17
+ new_jobs = (
18
+ TextLogError .objects .filter (
19
+ job__id__in = jobs_to_unclassify , new_failure = True , job__failure_classification_id = 8
20
+ )
21
+ .values ("job__id" )
22
+ .distinct ()
23
+ )
24
+ jobs_to_six = [j ["job__id" ] for j in new_jobs ]
25
+ jobs_to_one = list (set (jobs_to_unclassify ) - set (jobs_to_six ))
26
+
27
+ if jobs_to_six :
28
+ target_jobs = Job .objects .filter (id__in = jobs_to_six , result = "testfailed" )
29
+ if target_jobs :
30
+ target_jobs .update (failure_classification_id = 6 )
31
+ if jobs_to_one :
32
+ target_jobs = Job .objects .filter (id__in = jobs_to_one , result = "testfailed" )
33
+ if target_jobs :
34
+ target_jobs .update (failure_classification_id = 1 )
4
35
5
36
6
37
def _check_and_mark_infra (current_job , job_ids , push_ids ):
@@ -42,7 +73,7 @@ def _check_and_mark_infra(current_job, job_ids, push_ids):
42
73
43
74
# ignore previous classified, we are looking for NEW extra jobs
44
75
if len ([ej for ej in extra_jobs if ej ["failure_classification_id" ] != 8 ]) == 0 :
45
- return
76
+ return [], []
46
77
47
78
# ensure 50% 'success' rate
48
79
# success here means the task ran and produced groups | is success
@@ -52,20 +83,24 @@ def _check_and_mark_infra(current_job, job_ids, push_ids):
52
83
if job ["id" ] not in job_ids and job ["result" ] != "success" :
53
84
extra_failed .append (job )
54
85
86
+ jobs_to_classify = []
87
+ jobs_to_unclassify = []
88
+
55
89
# look for failure rate > 50% and exit early
56
90
if len (extra_failed ) / len (extra_jobs ) > 0.5 :
57
91
# as failure rate > 50%, if any jobs are fc_id=8 classify as fc_id=1
58
92
for job in extra_failed :
59
93
if job ["failure_classification_id" ] == 8 :
60
- Job .objects .filter (id = job ["id" ]).update (failure_classification_id = 1 )
61
- return
94
+ jobs_to_unclassify .append (job ["id" ])
62
95
63
96
# any extra_jobs will be failures without groups (infra/timeout/etc.)
64
97
# theoretically there could be many jobs here
65
98
# mark extra_jobs as `intermittent_needs_classification`
66
99
for job in extra_failed :
67
100
if job ["failure_classification_id" ] not in [4 , 8 ]:
68
- Job .objects .filter (id = job ["id" ]).update (failure_classification_id = 8 )
101
+ jobs_to_classify .append (job ["id" ])
102
+
103
+ return jobs_to_classify , jobs_to_unclassify
69
104
70
105
71
106
def check_and_mark_intermittent (job_id ):
@@ -91,7 +126,7 @@ def check_and_mark_intermittent(job_id):
91
126
)
92
127
counter = - 1
93
128
for id in idlist :
94
- if id == current_job .push .id :
129
+ if id [ "id" ] == current_job .push .id :
95
130
counter = 0
96
131
continue
97
132
if counter < 0 :
@@ -100,7 +135,7 @@ def check_and_mark_intermittent(job_id):
100
135
break
101
136
elif current_job .repository .id != 77 and counter >= 3 :
102
137
break
103
- ids .append (id )
138
+ ids .append (id [ "id" ] )
104
139
counter += 1
105
140
106
141
all_groups = (
@@ -135,7 +170,8 @@ def check_and_mark_intermittent(job_id):
135
170
# If no groups, look for infra
136
171
distinct_job_ids = list (set ([f ["job_logs__job__id" ] for f in all_groups ]))
137
172
if len (distinct_job_ids ) == 1 :
138
- return _check_and_mark_infra (current_job , distinct_job_ids , ids )
173
+ to_classify , to_unclassify = _check_and_mark_infra (current_job , distinct_job_ids , ids )
174
+ return classify (to_classify , to_unclassify )
139
175
140
176
mappings = {}
141
177
job_classifications = {}
@@ -151,6 +187,7 @@ def check_and_mark_intermittent(job_id):
151
187
# we have a variant
152
188
continue
153
189
190
+ # TODO: consider storing a list of job.id that are fc_id=8
154
191
# store job:fc_id so we can reference what needs changed
155
192
if item ["job_logs__job__id" ] not in job_classifications :
156
193
job_classifications [item ["job_logs__job__id" ]] = item [
@@ -181,18 +218,14 @@ def check_and_mark_intermittent(job_id):
181
218
current_changed_groups = {}
182
219
for group in mappings .get (current_job .push .id , {}).get ("groups" , []):
183
220
all_data = []
184
- current_data = []
221
+ current_data = [
222
+ mappings [current_job .push .id ]["groups" ][group ][j ]
223
+ for j in mappings [current_job .push .id ]["groups" ][group ]
224
+ ]
185
225
for id in mappings .keys ():
186
226
all_data .extend (
187
227
[mappings [id ]["groups" ][group ][j ] for j in mappings [id ]["groups" ].get (group , {})]
188
228
)
189
- if id == current_job .push .id :
190
- current_data .extend (
191
- [
192
- mappings [id ]["groups" ][group ][j ]
193
- for j in mappings [id ]["groups" ].get (group , {})
194
- ]
195
- )
196
229
197
230
# if new data changes results, update
198
231
pass_rate = len ([s for s in all_data if s == GroupStatus .OK ]) / len (all_data )
@@ -203,9 +236,9 @@ def check_and_mark_intermittent(job_id):
203
236
current_changed_groups [group ] = True
204
237
205
238
# all changed_groups need to be evaluated on previous 'failed' jobs to ensure all groups in that task are 'passing'
239
+ jobs_to_classify = [] # mark as fcid=8 (known intermittent)
240
+ jobs_to_unclassify = [] # previously parked as fcid=8, new failing data, now fcid=1
206
241
for id in mappings .keys ():
207
- jobs_to_classify = [] # mark as fcid=8 (known intermittent)
208
- jobs_to_unclassify = [] # previously parked as fcid=8, new failing data, now fcid=1
209
242
for job in mappings [id ]["jobs" ]:
210
243
all_green = True
211
244
current_all_green = True
@@ -229,19 +262,7 @@ def check_and_mark_intermittent(job_id):
229
262
elif job_classifications [job ] == 8 :
230
263
jobs_to_unclassify .append (job )
231
264
232
- # TODO: consider job.result=(busted, exception)
233
- for job in jobs_to_classify :
234
- target_job = Job .objects .filter (
235
- id = job , result = "testfailed" , failure_classification_id__in = [1 , 6 ]
236
- )
237
- if target_job :
238
- target_job .update (failure_classification_id = 8 )
239
-
240
- for job in jobs_to_unclassify :
241
- target_job = Job .objects .filter (
242
- id = job , result = "testfailed" , failure_classification_id = 8
243
- )
244
- if target_job :
245
- target_job .update (failure_classification_id = 1 )
246
-
247
- return _check_and_mark_infra (current_job , distinct_job_ids , ids )
265
+ to_classify , to_unclassify = _check_and_mark_infra (current_job , distinct_job_ids , ids )
266
+ jobs_to_classify .extend (to_classify )
267
+ jobs_to_unclassify .extend (to_unclassify )
268
+ return classify (jobs_to_classify , jobs_to_unclassify )
0 commit comments