Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions tests/accuracy_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

import flag_gems

from .conftest import QUICK_MODE, TO_CPU
from .conftest import QUICK_MODE, TO_CPU, TO_DEVICE

fp64_is_supported = flag_gems.runtime.device.support_fp64
bf16_is_supported = flag_gems.runtime.device.support_bf16
Expand Down Expand Up @@ -178,6 +178,8 @@ def to_reference(inp, upcast=False):
ref_inp = inp
if TO_CPU:
ref_inp = ref_inp.to("cpu")
if TO_DEVICE:
ref_inp = ref_inp.to(flag_gems.device)
if upcast:
if ref_inp.is_complex():
ref_inp = ref_inp.to(torch.complex128)
Expand All @@ -187,7 +189,7 @@ def to_reference(inp, upcast=False):


def to_cpu(res, ref):
if TO_CPU:
if TO_CPU or ref.device == torch.device("cpu"):
res = res.to("cpu")
assert ref.device == torch.device("cpu")
return res
Expand Down
3 changes: 3 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,9 @@ def pytest_configure(config):
global TO_CPU
TO_CPU = config.getoption("--ref") == "cpu"

global TO_DEVICE
TO_DEVICE = config.getoption("--ref") == "device"

global QUICK_MODE
QUICK_MODE = config.getoption("--mode") == "quick"

Expand Down
34 changes: 24 additions & 10 deletions tests/test_attention_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,9 @@ def make_input(
set_philox_state(1234567890, 0, device)
q_shape = (batch, num_head, q_seq_len, head_size)
kv_shape = (batch, num_head_k, kv_seq_len, head_size)
q = torch.empty(q_shape, dtype=dtype, device=device).uniform_(-0.05, 0.05)
k = torch.empty(kv_shape, dtype=dtype, device=device).uniform_(-0.05, 0.05)
v = torch.empty(kv_shape, dtype=dtype, device=device).uniform_(-0.05, 0.05)
q = torch.empty(q_shape, dtype=dtype, device="cpu").uniform_(-0.05, 0.05)
k = torch.empty(kv_shape, dtype=dtype, device="cpu").uniform_(-0.05, 0.05)
v = torch.empty(kv_shape, dtype=dtype, device="cpu").uniform_(-0.05, 0.05)
return q, k, v


Expand Down Expand Up @@ -313,13 +313,15 @@ def test_sdpa_legacy(
dtype,
enable_gqa,
):
device = torch_device_fn.current_device()
q, k, v = make_input(
batch, num_q_head, num_kv_head, q_seq_len, kv_seq_len, head_size, dtype, device
)
ref_q = to_reference(q, False)
ref_k = to_reference(k, False)
ref_v = to_reference(v, False)
q = q.to(device)
k = k.to(device)
v = v.to(device)
scale = float(1.0 / np.sqrt(head_size))
torch_result = torch_sdpa(
ref_q, ref_k, ref_v, scale, is_causal, enable_gqa=enable_gqa
Expand Down Expand Up @@ -349,13 +351,15 @@ def test_sdpa_legacy(
def test_sdpa_square_qk_even_mn(
batch, num_head, q_seq_len, kv_seq_len, head_size, is_causal, dtype
):
device = torch_device_fn.current_device()
q, k, v = make_input(
batch, num_head, num_head, q_seq_len, kv_seq_len, head_size, dtype, device
)
ref_q = to_reference(q, False)
ref_k = to_reference(k, False)
ref_v = to_reference(v, False)
q = q.to(device)
k = k.to(device)
v = v.to(device)
scale = float(1.0 / np.sqrt(head_size))
torch_result = torch_sdpa(ref_q, ref_k, ref_v, scale, is_causal)
with flag_gems.use_gems():
Expand All @@ -378,13 +382,15 @@ def test_sdpa_square_qk_even_mn(
def test_sdpa_nonsquare_qk(
batch, num_head, q_seq_len, kv_seq_len, head_size, is_causal, dtype
):
device = torch_device_fn.current_device()
q, k, v = make_input(
batch, num_head, num_head, q_seq_len, kv_seq_len, head_size, dtype, device
)
ref_q = to_reference(q, False)
ref_k = to_reference(k, False)
ref_v = to_reference(v, False)
q = q.to(device)
k = k.to(device)
v = v.to(device)
scale = float(1.0 / np.sqrt(head_size))
torch_result = torch_sdpa(ref_q, ref_k, ref_v, scale, is_causal)
with flag_gems.use_gems():
Expand All @@ -408,13 +414,15 @@ def test_sdpa_nonsquare_qk(
def test_flash_fwd_nonsquare_qk(
batch, num_head, q_seq_len, kv_seq_len, head_size, is_causal, dtype
):
device = torch_device_fn.current_device()
q, k, v = make_input(
batch, num_head, num_head, q_seq_len, kv_seq_len, head_size, dtype, device
)
ref_q = to_reference(q, False)
ref_k = to_reference(k, False)
ref_v = to_reference(v, False)
q = q.to(device)
k = k.to(device)
v = v.to(device)
scale = float(1.0 / np.sqrt(head_size))

torch_out, torch_lse, _, _, _ = torch_flash_fwd(
Expand Down Expand Up @@ -452,13 +460,15 @@ def test_flash_fwd_gqa_alibi_softcap(
alibi,
dtype,
):
device = torch_device_fn.current_device()
q, k, v = make_input(
batch, num_head, num_head_k, q_seq_len, kv_seq_len, head_size, dtype, device
)
ref_q = to_reference(q, False)
ref_k = to_reference(k, False)
ref_v = to_reference(v, False)
q = q.to(device)
k = k.to(device)
v = v.to(device)
scale = float(1.0 / np.sqrt(head_size))

if alibi:
Expand Down Expand Up @@ -528,13 +538,15 @@ def test_flash_splitkv(
alibi,
dtype,
):
device = torch_device_fn.current_device()
q, k, v = make_input(
batch, num_head, num_head_k, q_seq_len, kv_seq_len, head_size, dtype, device
)
ref_q = to_reference(q, False)
ref_k = to_reference(k, False)
ref_v = to_reference(v, False)
q = q.to(device)
k = k.to(device)
v = v.to(device)
scale = float(1.0 / np.sqrt(head_size))

if alibi:
Expand Down Expand Up @@ -604,13 +616,15 @@ def test_flash_fwd_swa(
window_size_right,
dtype,
):
device = torch_device_fn.current_device()
q, k, v = make_input(
batch, num_head, num_head, q_seq_len, kv_seq_len, head_size, dtype, device
)
ref_q = to_reference(q, False)
ref_k = to_reference(k, False)
ref_v = to_reference(v, False)
q = q.to(device)
k = k.to(device)
v = v.to(device)
scale = float(1.0 / np.sqrt(head_size))

torch_out, torch_lse, _, _, _ = torch_flash_fwd(
Expand Down
Loading