@@ -39,7 +39,7 @@ def __init__(
39
39
self .start = start
40
40
self .end = end
41
41
42
- def fetch_by_query (self , query : str ) -> Any :
42
+ def _fetch_by_query (self , query : str ) -> Any :
43
43
"""Fetches the given query with time range."""
44
44
resp = requests .get (
45
45
self .url + "/api/v1/query_range" ,
@@ -48,25 +48,25 @@ def fetch_by_query(self, query: str) -> Any:
48
48
resp .raise_for_status ()
49
49
return resp .json ()
50
50
51
- def fetch_cpu_for_pod (
51
+ def _fetch_cpu_for_pod (
52
52
self , container_matcher : str , pod_name : str
53
53
) -> Dict [str , List [float ]]:
54
54
"""Fetches the cpu data for each pod and construct the container
55
55
name to cpu data list Dict."""
56
56
query = (
57
- 'irate( container_cpu_usage_seconds_total{job="kubernetes-cadvisor",pod="'
57
+ 'container_cpu_usage_seconds_total{job="kubernetes-cadvisor",pod="'
58
58
+ pod_name
59
59
+ '",container='
60
60
+ container_matcher
61
- + "}[100s]) "
61
+ + "}"
62
62
)
63
63
logging .debug ("running prometheus query for cpu:" + query )
64
- cpu_data = self .fetch_by_query (query )
64
+ cpu_data = self ._fetch_by_query (query )
65
65
logging .debug ("raw cpu data:" + str (cpu_data ))
66
66
cpu_container_name_to_data_list = get_data_list_from_timeseries (cpu_data )
67
67
return cpu_container_name_to_data_list
68
68
69
- def fetch_memory_for_pod (
69
+ def _fetch_memory_for_pod (
70
70
self , container_matcher : str , pod_name : str
71
71
) -> Dict [str , List [float ]]:
72
72
"""Fetches the memory data for each pod and construct the
@@ -80,16 +80,15 @@ def fetch_memory_for_pod(
80
80
)
81
81
82
82
logging .debug ("running prometheus query for memory:" + query )
83
- memory_data = self .fetch_by_query (query )
83
+ memory_data = self ._fetch_by_query (query )
84
84
85
85
logging .debug ("raw memory data:" + str (memory_data ))
86
86
memory_container_name_to_data_list = get_data_list_from_timeseries (memory_data )
87
87
88
88
return memory_container_name_to_data_list
89
89
90
90
def fetch_cpu_and_memory_data (
91
- self , container_list : List [str ], pod_list : List [str ]
92
- ) -> Dict [str , Dict [str , Dict [str , Dict [str , Any ]]]]:
91
+ self , container_list : List [str ], pod_list : List [str ], test_duration_seconds : float ) -> Dict [str , Dict [str , Dict [str , float ]]]:
93
92
"""Fetches and process min, max, mean, std for the memory and cpu
94
93
data for each container in the container_list for each pod in
95
94
the pod_list and construct processed data group first by metric
@@ -101,11 +100,11 @@ def fetch_cpu_and_memory_data(
101
100
raw_cpu_data = {}
102
101
raw_memory_data = {}
103
102
for pod in pod_list :
104
- raw_cpu_data [pod ] = self .fetch_cpu_for_pod (container_matcher , pod )
105
- raw_memory_data [pod ] = self .fetch_memory_for_pod (container_matcher , pod )
103
+ raw_cpu_data [pod ] = self ._fetch_cpu_for_pod (container_matcher , pod )
104
+ raw_memory_data [pod ] = self ._fetch_memory_for_pod (container_matcher , pod )
106
105
107
- processed_data ["cpu" ] = compute_min_max_mean_std (raw_cpu_data )
108
- processed_data ["memory" ] = compute_min_max_mean_std (raw_memory_data )
106
+ processed_data ["cpu" ] = compute_total_cpu (raw_cpu_data , test_duration_seconds )
107
+ processed_data ["memory" ] = compute_average_memory_usage (raw_memory_data )
109
108
return processed_data
110
109
111
110
@@ -147,35 +146,34 @@ def get_data_list_from_timeseries(data: Any) -> Dict[str, List[float]]:
147
146
return container_name_to_data_list
148
147
149
148
150
- def compute_min_max_mean_std_for_each (data : List [float ]) -> Dict [str , Any ]:
151
- """Computes the min, max, mean and standard deviation for
152
- given list of data and return the processed results in a Dict
153
- keyed by min, max, mean and standard deviation."""
154
- min_value = min (data )
155
- max_value = max (data )
156
- mean_value = statistics .mean (data )
157
- std_value = statistics .pstdev (data )
158
- processed_data = {
159
- "min" : min_value ,
160
- "max" : max_value ,
161
- "mean" : mean_value ,
162
- "std" : std_value ,
163
- }
164
- return processed_data
165
-
166
-
167
- def compute_min_max_mean_std (
168
- cpu_data_dicts : Dict [str , Dict [str , List [float ]]]
169
- ) -> Dict [str , Dict [str , Dict [str , Any ]]]:
149
+
150
+ def compute_total_cpu (
151
+ cpu_data_dicts : Dict [str , Dict [str , List [float ]]], test_duration_seconds : float
152
+ ) -> Dict [str , Dict [str ,float ]]:
153
+ """Computes the total cpu seconds by CPUs[end]-CPUs[start]."""
154
+ pod_name_to_data_dicts = {}
155
+ for pod_name , pod_data_dicts in cpu_data_dicts .items ():
156
+ container_name_to_processed_data = {}
157
+ for container_name , data_list in pod_data_dicts .items ():
158
+ container_name_to_processed_data [
159
+ container_name
160
+ ] = float (data_list [len (data_list )- 1 ]- data_list [0 ]) / test_duration_seconds
161
+ pod_name_to_data_dicts [pod_name ] = container_name_to_processed_data
162
+
163
+ return pod_name_to_data_dicts
164
+
165
+ def compute_average_memory_usage (
166
+ memory_data_dicts : Dict [str , Dict [str , List [float ]]]
167
+ ) -> Dict [str , Dict [str , float ]]:
170
168
"""Computes the min, max, mean and standard deviation for
171
169
given set of data."""
172
170
pod_name_to_data_dicts = {}
173
- for pod_name , pod_data_dicts in cpu_data_dicts .items ():
171
+ for pod_name , pod_data_dicts in memory_data_dicts .items ():
174
172
container_name_to_processed_data = {}
175
173
for container_name , data_list in pod_data_dicts .items ():
176
174
container_name_to_processed_data [
177
175
container_name
178
- ] = compute_min_max_mean_std_for_each (data_list )
176
+ ] = statistics . mean (data_list )
179
177
pod_name_to_data_dicts [pod_name ] = container_name_to_processed_data
180
178
181
179
return pod_name_to_data_dicts
@@ -255,15 +253,18 @@ def main() -> None:
255
253
256
254
with open (args .scenario_result_file , "r" ) as q :
257
255
scenario_result = json .load (q )
256
+ start_time = convert_UTC_to_epoch (scenario_result ["summary" ]["startTime" ])
257
+ end_time = convert_UTC_to_epoch (scenario_result ["summary" ]["endTime" ])
258
258
p = Prometheus (
259
259
url = args .url ,
260
- start = convert_UTC_to_epoch ( scenario_result [ "summary" ][ "startTime" ]) ,
261
- end = convert_UTC_to_epoch ( scenario_result [ "summary" ][ "endTime" ]) ,
260
+ start = start_time ,
261
+ end = end_time ,
262
262
)
263
263
264
+ test_duration_sedonds = float (end_time ) - float (start_time )
264
265
pod_list = construct_pod_list (args .node_info_file , args .pod_type )
265
266
processed_data = p .fetch_cpu_and_memory_data (
266
- container_list = args .container_name , pod_list = pod_list
267
+ container_list = args .container_name , pod_list = pod_list , test_duration_seconds = test_duration_sedonds
267
268
)
268
269
269
270
logging .debug (json .dumps (processed_data , sort_keys = True , indent = 4 ))
0 commit comments