Skip to content

Commit 1165309

Browse files
authored
Changes to verify_l3_cache TC to handle shared L3 Cache scenarios (#3951)
* Changes to l3_cache TC to handle multiple nodes in multiple sockets * changes to fix flake8 and Black errors * changes to simplify l3_cache tc and fix flake8 * Fixing mypy generic-any errors
1 parent acfe96b commit 1165309

File tree

1 file changed

+124
-5
lines changed
  • microsoft/testsuites/core

1 file changed

+124
-5
lines changed

microsoft/testsuites/core/cpu.py

Lines changed: 124 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
from __future__ import annotations
44

55
import time
6+
from typing import Any
67

78
from assertpy.assertpy import assert_that
89

@@ -117,13 +118,44 @@ def verify_l3_cache(
117118
self._verify_node_mapping(node, effective_numa_node_size)
118119
return
119120

121+
# For all other cases, check L3 cache mapping with socket awareness
120122
cpu_info = lscpu.get_cpu_info()
123+
124+
# Build a mapping of socket -> NUMA nodes and socket -> L3 caches
125+
socket_to_numa_nodes: dict[int, set[int]] = {}
126+
socket_to_l3_caches: dict[int, set[int]] = {}
127+
121128
for cpu in cpu_info:
122-
assert_that(
123-
cpu.l3_cache,
124-
"L3 cache of each core must be mapped to the NUMA node "
125-
"associated with the core.",
126-
).is_equal_to(cpu.numa_node)
129+
socket = cpu.socket
130+
numa_node = cpu.numa_node
131+
l3_cache = cpu.l3_cache
132+
133+
# Track NUMA nodes per socket
134+
if socket not in socket_to_numa_nodes:
135+
socket_to_numa_nodes[socket] = set()
136+
socket_to_numa_nodes[socket].add(numa_node)
137+
138+
# Track L3 caches per socket
139+
if socket not in socket_to_l3_caches:
140+
socket_to_l3_caches[socket] = set()
141+
socket_to_l3_caches[socket].add(l3_cache)
142+
143+
# Check if this is a simple 1:1 mapping (traditional case)
144+
all_numa_nodes = set()
145+
all_l3_caches = set()
146+
for numa_nodes in socket_to_numa_nodes.values():
147+
all_numa_nodes.update(numa_nodes)
148+
for l3_caches in socket_to_l3_caches.values():
149+
all_l3_caches.update(l3_caches)
150+
151+
# Check if this is a simple 1:1 mapping or socket-aware mapping
152+
# If NUMA nodes and L3 caches are identical sets, use simple verification
153+
if self._is_one_to_one_mapping(socket_to_numa_nodes, socket_to_l3_caches):
154+
self._verify_one_to_one_mapping(cpu_info, log)
155+
else:
156+
self._verify_socket_aware_mapping(
157+
cpu_info, socket_to_numa_nodes, socket_to_l3_caches, log
158+
)
127159

128160
@TestCaseMetadata(
129161
description="""
@@ -275,3 +307,90 @@ def _verify_node_mapping(self, node: Node, numa_node_size: int) -> None:
275307
"L3 cache of each core must be mapped to the NUMA node "
276308
"associated with the core.",
277309
).is_equal_to(numa_node_id)
310+
311+
def _is_one_to_one_mapping(
312+
self,
313+
socket_to_numa_nodes: dict[int, set[int]],
314+
socket_to_l3_caches: dict[int, set[int]],
315+
) -> bool:
316+
"""Check if NUMA nodes and L3 caches have a 1:1 mapping."""
317+
all_numa_nodes = set()
318+
all_l3_caches = set()
319+
for numa_nodes in socket_to_numa_nodes.values():
320+
all_numa_nodes.update(numa_nodes)
321+
for l3_caches in socket_to_l3_caches.values():
322+
all_l3_caches.update(l3_caches)
323+
324+
return all_numa_nodes == all_l3_caches
325+
326+
def _verify_one_to_one_mapping(self, cpu_info: list[Any], log: Logger) -> None:
327+
"""Verify traditional 1:1 mapping between NUMA nodes and L3 caches."""
328+
log.debug("Detected 1:1 mapping between NUMA nodes and L3 caches")
329+
for cpu in cpu_info:
330+
assert_that(
331+
cpu.l3_cache,
332+
"L3 cache of each core must be mapped to the NUMA node "
333+
"associated with the core.",
334+
).is_equal_to(cpu.numa_node)
335+
336+
def _verify_socket_aware_mapping(
337+
self,
338+
cpu_info: list[Any],
339+
socket_to_numa_nodes: dict[int, set[int]],
340+
socket_to_l3_caches: dict[int, set[int]],
341+
log: Logger,
342+
) -> None:
343+
"""Verify shared L3 cache mapping within sockets."""
344+
log.debug("Detected shared L3 cache within sockets")
345+
346+
# Verify consistency: all CPUs in same NUMA node should have same L3 cache
347+
self._verify_numa_consistency(cpu_info)
348+
349+
# Verify isolation: L3 caches should not be shared across sockets
350+
self._verify_socket_isolation(socket_to_numa_nodes, socket_to_l3_caches, log)
351+
352+
def _verify_numa_consistency(self, cpu_info: list[Any]) -> None:
353+
"""Verify all CPUs in the same NUMA node have the same L3 cache."""
354+
numa_to_l3_mapping = {}
355+
for cpu in cpu_info:
356+
if cpu.numa_node not in numa_to_l3_mapping:
357+
numa_to_l3_mapping[cpu.numa_node] = cpu.l3_cache
358+
else:
359+
# Verify consistency: all CPUs in same NUMA node should have same L3
360+
assert_that(
361+
cpu.l3_cache,
362+
f"All CPUs in NUMA node {cpu.numa_node} should have the same "
363+
f"L3 cache mapping, expected "
364+
f"{numa_to_l3_mapping[cpu.numa_node]} "
365+
f"but found {cpu.l3_cache} for CPU {cpu.cpu}",
366+
).is_equal_to(numa_to_l3_mapping[cpu.numa_node])
367+
368+
def _verify_socket_isolation(
369+
self,
370+
socket_to_numa_nodes: dict[int, set[int]],
371+
socket_to_l3_caches: dict[int, set[int]],
372+
log: Logger,
373+
) -> None:
374+
"""Verify L3 caches are not shared across sockets."""
375+
for socket, numa_nodes in socket_to_numa_nodes.items():
376+
l3_caches_in_socket = socket_to_l3_caches[socket]
377+
378+
# Get L3 caches used by other sockets
379+
other_socket_l3_caches = set()
380+
for other_socket, other_l3_caches in socket_to_l3_caches.items():
381+
if other_socket != socket:
382+
other_socket_l3_caches.update(other_l3_caches)
383+
384+
# Verify no L3 cache is shared across sockets
385+
shared_l3_caches = l3_caches_in_socket.intersection(other_socket_l3_caches)
386+
assert_that(
387+
len(shared_l3_caches),
388+
f"L3 caches should not be shared across sockets. "
389+
f"Socket {socket} shares L3 cache(s) {shared_l3_caches} with "
390+
f"other sockets",
391+
).is_equal_to(0)
392+
393+
log.debug(
394+
f"Socket {socket}: NUMA nodes {sorted(numa_nodes)} use "
395+
f"L3 cache(s) {sorted(l3_caches_in_socket)}"
396+
)

0 commit comments

Comments
 (0)