Skip to content

Commit b327f29

Browse files
committed
Update the cpu query to collect total cpu seconds during a test
The original cpu query has collect the numbers of CPU used at the time the query is performed. The new query collects the total cpu seconds for the duration of the test by use the formula: total cpu seconds at the end - total cpu usage at the beginning of the test. Then the total number of cpu used can be calculated as: total cpu seconds used during test divid the test duration.
1 parent 1d2a6ce commit b327f29

File tree

1 file changed

+39
-38
lines changed

1 file changed

+39
-38
lines changed

tools/run_tests/performance/prometheus.py

Lines changed: 39 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -39,7 +39,7 @@ def __init__(
3939
self.start = start
4040
self.end = end
4141

42-
def fetch_by_query(self, query: str) -> Any:
42+
def _fetch_by_query(self, query: str) -> Any:
4343
"""Fetches the given query with time range."""
4444
resp = requests.get(
4545
self.url + "/api/v1/query_range",
@@ -48,25 +48,25 @@ def fetch_by_query(self, query: str) -> Any:
4848
resp.raise_for_status()
4949
return resp.json()
5050

51-
def fetch_cpu_for_pod(
51+
def _fetch_cpu_for_pod(
5252
self, container_matcher: str, pod_name: str
5353
) -> Dict[str, List[float]]:
5454
"""Fetches the cpu data for each pod and construct the container
5555
name to cpu data list Dict."""
5656
query = (
57-
'irate(container_cpu_usage_seconds_total{job="kubernetes-cadvisor",pod="'
57+
'container_cpu_usage_seconds_total{job="kubernetes-cadvisor",pod="'
5858
+ pod_name
5959
+ '",container='
6060
+ container_matcher
61-
+ "}[100s])"
61+
+ "}"
6262
)
6363
logging.debug("running prometheus query for cpu:" + query)
64-
cpu_data = self.fetch_by_query(query)
64+
cpu_data = self._fetch_by_query(query)
6565
logging.debug("raw cpu data:" + str(cpu_data))
6666
cpu_container_name_to_data_list = get_data_list_from_timeseries(cpu_data)
6767
return cpu_container_name_to_data_list
6868

69-
def fetch_memory_for_pod(
69+
def _fetch_memory_for_pod(
7070
self, container_matcher: str, pod_name: str
7171
) -> Dict[str, List[float]]:
7272
"""Fetches the memory data for each pod and construct the
@@ -80,16 +80,15 @@ def fetch_memory_for_pod(
8080
)
8181

8282
logging.debug("running prometheus query for memory:" + query)
83-
memory_data = self.fetch_by_query(query)
83+
memory_data = self._fetch_by_query(query)
8484

8585
logging.debug("raw memory data:" + str(memory_data))
8686
memory_container_name_to_data_list = get_data_list_from_timeseries(memory_data)
8787

8888
return memory_container_name_to_data_list
8989

9090
def fetch_cpu_and_memory_data(
91-
self, container_list: List[str], pod_list: List[str]
92-
) -> Dict[str, Dict[str, Dict[str, Dict[str, Any]]]]:
91+
self, container_list: List[str], pod_list: List[str], test_duration_seconds: float) -> Dict[str, Dict[str, Dict[str, float]]]:
9392
"""Fetches and process min, max, mean, std for the memory and cpu
9493
data for each container in the container_list for each pod in
9594
the pod_list and construct processed data group first by metric
@@ -101,11 +100,11 @@ def fetch_cpu_and_memory_data(
101100
raw_cpu_data = {}
102101
raw_memory_data = {}
103102
for pod in pod_list:
104-
raw_cpu_data[pod] = self.fetch_cpu_for_pod(container_matcher, pod)
105-
raw_memory_data[pod] = self.fetch_memory_for_pod(container_matcher, pod)
103+
raw_cpu_data[pod] = self._fetch_cpu_for_pod(container_matcher, pod)
104+
raw_memory_data[pod] = self._fetch_memory_for_pod(container_matcher, pod)
106105

107-
processed_data["cpu"] = compute_min_max_mean_std(raw_cpu_data)
108-
processed_data["memory"] = compute_min_max_mean_std(raw_memory_data)
106+
processed_data["cpu"] = compute_total_cpu(raw_cpu_data, test_duration_seconds)
107+
processed_data["memory"] = compute_average_memory_usage(raw_memory_data)
109108
return processed_data
110109

111110

@@ -147,35 +146,34 @@ def get_data_list_from_timeseries(data: Any) -> Dict[str, List[float]]:
147146
return container_name_to_data_list
148147

149148

150-
def compute_min_max_mean_std_for_each(data: List[float]) -> Dict[str, Any]:
151-
"""Computes the min, max, mean and standard deviation for
152-
given list of data and return the processed results in a Dict
153-
keyed by min, max, mean and standard deviation."""
154-
min_value = min(data)
155-
max_value = max(data)
156-
mean_value = statistics.mean(data)
157-
std_value = statistics.pstdev(data)
158-
processed_data = {
159-
"min": min_value,
160-
"max": max_value,
161-
"mean": mean_value,
162-
"std": std_value,
163-
}
164-
return processed_data
165-
166-
167-
def compute_min_max_mean_std(
168-
cpu_data_dicts: Dict[str, Dict[str, List[float]]]
169-
) -> Dict[str, Dict[str, Dict[str, Any]]]:
149+
150+
def compute_total_cpu(
151+
cpu_data_dicts: Dict[str, Dict[str, List[float]]], test_duration_seconds: float
152+
) -> Dict[str, Dict[str,float]]:
153+
"""Computes the total cpu seconds by CPUs[end]-CPUs[start]."""
154+
pod_name_to_data_dicts = {}
155+
for pod_name, pod_data_dicts in cpu_data_dicts.items():
156+
container_name_to_processed_data = {}
157+
for container_name, data_list in pod_data_dicts.items():
158+
container_name_to_processed_data[
159+
container_name
160+
] = float(data_list[len(data_list)-1]-data_list[0]) / test_duration_seconds
161+
pod_name_to_data_dicts[pod_name] = container_name_to_processed_data
162+
163+
return pod_name_to_data_dicts
164+
165+
def compute_average_memory_usage(
166+
memory_data_dicts: Dict[str, Dict[str, List[float]]]
167+
) -> Dict[str, Dict[str, float]]:
170168
"""Computes the min, max, mean and standard deviation for
171169
given set of data."""
172170
pod_name_to_data_dicts = {}
173-
for pod_name, pod_data_dicts in cpu_data_dicts.items():
171+
for pod_name, pod_data_dicts in memory_data_dicts.items():
174172
container_name_to_processed_data = {}
175173
for container_name, data_list in pod_data_dicts.items():
176174
container_name_to_processed_data[
177175
container_name
178-
] = compute_min_max_mean_std_for_each(data_list)
176+
] = statistics.mean(data_list)
179177
pod_name_to_data_dicts[pod_name] = container_name_to_processed_data
180178

181179
return pod_name_to_data_dicts
@@ -255,15 +253,18 @@ def main() -> None:
255253

256254
with open(args.scenario_result_file, "r") as q:
257255
scenario_result = json.load(q)
256+
start_time = convert_UTC_to_epoch(scenario_result["summary"]["startTime"])
257+
end_time = convert_UTC_to_epoch(scenario_result["summary"]["endTime"])
258258
p = Prometheus(
259259
url=args.url,
260-
start=convert_UTC_to_epoch(scenario_result["summary"]["startTime"]),
261-
end=convert_UTC_to_epoch(scenario_result["summary"]["endTime"]),
260+
start=start_time,
261+
end=end_time,
262262
)
263263

264+
test_duration_sedonds = float(end_time) - float(start_time)
264265
pod_list = construct_pod_list(args.node_info_file, args.pod_type)
265266
processed_data = p.fetch_cpu_and_memory_data(
266-
container_list=args.container_name, pod_list=pod_list
267+
container_list=args.container_name, pod_list=pod_list, test_duration_seconds=test_duration_sedonds
267268
)
268269

269270
logging.debug(json.dumps(processed_data, sort_keys=True, indent=4))

0 commit comments

Comments
 (0)