Skip to content

Conversation

@michel2323
Copy link
Member

@michel2323 michel2323 commented Sep 26, 2025

  • Adding sort! and sortperm
  • Add explicit device(x) ownership in global queue calls of MKL
  • Add oneSparseMatrixCSR(rowptr, colptr, nzval, dims) constructor

@github-actions
Copy link
Contributor

github-actions bot commented Sep 26, 2025

Your PR requires formatting changes to meet the project's style guidelines.
Please consider running Runic (git runic ms/sparse_fix) to apply these changes.

Click here to view the suggested changes.
diff --git a/lib/mkl/wrappers_sparse.jl b/lib/mkl/wrappers_sparse.jl
index 3f8b0bb..bb17907 100644
--- a/lib/mkl/wrappers_sparse.jl
+++ b/lib/mkl/wrappers_sparse.jl
@@ -1,5 +1,5 @@
 function sparse_release_matrix_handle(A::oneAbstractSparseMatrix)
-    if A.handle !== nothing
+    return if A.handle !== nothing
         try
             queue = global_queue(context(A.nzVal), device(A.nzVal))
             handle_ptr = Ref{matrix_handle_t}(A.handle)
@@ -8,7 +8,7 @@ function sparse_release_matrix_handle(A::oneAbstractSparseMatrix)
             synchronize(queue)
         catch err
             # Don't let finalizer errors crash the program
-            @warn "Error releasing sparse matrix handle" exception=err
+            @warn "Error releasing sparse matrix handle" exception = err
         end
     end
 end
@@ -117,10 +117,10 @@ for (fname, elty, intty) in ((:onemklSsparse_set_coo_data   , :Float32   , :Int3
             queue = global_queue(context(nzVal), device(nzVal))
             if m != 0 && n != 0
                 $fname(sycl_queue(queue), handle_ptr[], m, n, nnzA, 'O', rowInd, colInd, nzVal)
-                dA = oneSparseMatrixCOO{$elty, $intty}(handle_ptr[], rowInd, colInd, nzVal, (m,n), nnzA)
+                dA = oneSparseMatrixCOO{$elty, $intty}(handle_ptr[], rowInd, colInd, nzVal, (m, n), nnzA)
                 finalizer(sparse_release_matrix_handle, dA)
             else
-                dA = oneSparseMatrixCOO{$elty, $intty}(nothing, rowInd, colInd, nzVal, (m,n), nnzA)
+                dA = oneSparseMatrixCOO{$elty, $intty}(nothing, rowInd, colInd, nzVal, (m, n), nnzA)
             end
             return dA
         end
diff --git a/src/array.jl b/src/array.jl
index d576cdb..3225b5e 100644
--- a/src/array.jl
+++ b/src/array.jl
@@ -279,8 +279,9 @@ end
 
 ## interop with GPU arrays
 
-function Base.unsafe_convert(::Type{oneDeviceArray{T,N,AS.CrossWorkgroup}}, a::oneArray{T,N}) where {T,N}
-  oneDeviceArray{T,N,AS.CrossWorkgroup}(size(a), reinterpret(LLVMPtr{T,AS.CrossWorkgroup}, pointer(a)),
+function Base.unsafe_convert(::Type{oneDeviceArray{T, N, AS.CrossWorkgroup}}, a::oneArray{T, N}) where {T, N}
+    return oneDeviceArray{T, N, AS.CrossWorkgroup}(
+        size(a), reinterpret(LLVMPtr{T, AS.CrossWorkgroup}, pointer(a)),
                                 a.maxsize - a.offset*Base.elsize(a))
 end
 
diff --git a/src/broadcast.jl b/src/broadcast.jl
index 58a30dd..f9d38f6 100644
--- a/src/broadcast.jl
+++ b/src/broadcast.jl
@@ -11,9 +11,9 @@ BroadcastStyle(W::Type{<:oneWrappedArray{T, N}}) where {T, N} =
 # when we are dealing with different buffer styles, we cannot know
 # which one is better, so use shared memory
 BroadcastStyle(
-        ::oneArrayStyle{N, B1},
-        ::oneArrayStyle{N, B2},
-    ) where {N,B1,B2} =
+    ::oneArrayStyle{N, B1},
+    ::oneArrayStyle{N, B2},
+) where {N, B1, B2} =
     oneArrayStyle{N, oneL0.SharedBuffer}()
 
 # allocation of output arrays
diff --git a/src/compiler/execution.jl b/src/compiler/execution.jl
index 6503a9b..4900e77 100644
--- a/src/compiler/execution.jl
+++ b/src/compiler/execution.jl
@@ -88,7 +88,7 @@ Adapt.adapt_storage(to::KernelAdaptor, p::ZePtr{T}) where {T} = reinterpret(Ptr{
 
 # convert oneAPI host arrays to device arrays
 Adapt.adapt_storage(::KernelAdaptor, xs::oneArray{T,N}) where {T,N} =
-  Base.unsafe_convert(oneDeviceArray{T,N,AS.CrossWorkgroup}, xs)
+    Base.unsafe_convert(oneDeviceArray{T, N, AS.CrossWorkgroup}, xs)
 
 # Base.RefValue isn't GPU compatible, so provide a compatible alternative.
 # TODO: port improvements from CUDA.jl
diff --git a/src/indexing.jl b/src/indexing.jl
index d46c67b..13d1947 100644
--- a/src/indexing.jl
+++ b/src/indexing.jl
@@ -26,9 +26,9 @@ function Base.findall(bools::oneArray{Bool})
     ys = oneArray{I}(undef, n)
 
     if n > 0
-        kernel = @oneapi launch=false _ker!(ys, bools, indices)
+        kernel = @oneapi launch = false _ker!(ys, bools, indices)
         group_size = launch_configuration(kernel)
-        kernel(ys, bools, indices; items=group_size, groups=cld(length(bools), group_size))
+        kernel(ys, bools, indices; items = group_size, groups = cld(length(bools), group_size))
     end
     # unsafe_free!(indices)
 
diff --git a/test/device/intrinsics.jl b/test/device/intrinsics.jl
index 5e5605e..e1991fd 100644
--- a/test/device/intrinsics.jl
+++ b/test/device/intrinsics.jl
@@ -226,7 +226,7 @@ end
 
         s[t] = d[t]
         s2[t] = 2*d[t]
-        barrier(0)
+                barrier(0)
         d[t] = s[tr]
 
         return
@@ -252,7 +252,7 @@ end
 
             s[t] = d[t]
             s2[t] = d[t]
-            barrier(0)
+                    barrier(0)
             d[t] = s[tr]
 
             return
diff --git a/test/execution.jl b/test/execution.jl
index cd3db01..78f87d6 100644
--- a/test/execution.jl
+++ b/test/execution.jl
@@ -307,12 +307,12 @@ end
     @oneapi kernel(arr)
     @test Array(arr)[] == 1
 
-    function kernel2(ptr)
+        function kernel2(ptr)
         ptr[] = 2
         return
     end
 
-    @oneapi kernel2(arr)
+        @oneapi kernel2(arr)
     @test Array(arr)[] == 2
 end
 

@codecov
Copy link

codecov bot commented Sep 26, 2025

Codecov Report

❌ Patch coverage is 91.50943% with 9 lines in your changes missing coverage. Please review.
✅ Project coverage is 79.21%. Comparing base (24110a8) to head (47c3ff7).

Files with missing lines Patch % Lines
lib/mkl/wrappers_sparse.jl 90.90% 5 Missing ⚠️
lib/mkl/wrappers_blas.jl 90.47% 4 Missing ⚠️
Additional details and impacted files
@@            Coverage Diff             @@
##           master     #534      +/-   ##
==========================================
+ Coverage   79.10%   79.21%   +0.10%     
==========================================
  Files          47       48       +1     
  Lines        3001     3021      +20     
==========================================
+ Hits         2374     2393      +19     
- Misses        627      628       +1     

☔ View full report in Codecov by Sentry.
📢 Have feedback on the report? Share it here.

🚀 New features to boost your workflow:
  • ❄️ Test Analytics: Detect flaky tests, report on failures, and find test suite problems.

@michel2323 michel2323 changed the base branch from master to ms/sparse_fix October 1, 2025 16:03
@michel2323 michel2323 force-pushed the ms/sparse_fix branch 2 times, most recently from 570e801 to 5c833a7 Compare October 1, 2025 16:32
@michel2323 michel2323 changed the base branch from ms/sparse_fix to master November 4, 2025 15:43
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Labels

None yet

Projects

None yet

Development

Successfully merging this pull request may close these issues.

3 participants