@@ -133,8 +133,8 @@ def test_large_sparse_array(self):
133
133
134
134
135
135
@pytest .mark .skipif (not has_minkowskiengine , reason = "MinkowskiEngine not installed" )
136
- class TestMinkowskiEngineReal :
137
- def test_torch_sparse_to_minkowski_real (self ):
136
+ class TestMinkowskiEngine :
137
+ def test_torch_sparse_to_minkowski (self ):
138
138
# Create sparse tensor with 3D coordinates (batch, x, y) and features
139
139
indices = torch .tensor (
140
140
[[0 , 0 , 1 , 1 ], [0 , 1 , 0 , 1 ], [0 , 0 , 1 , 1 ], [3 , 4 , 5 , 6 ]], dtype = torch .int
@@ -145,12 +145,11 @@ def test_torch_sparse_to_minkowski_real(self):
145
145
146
146
result = torch_sparse_to_minkowski (tensor )
147
147
148
- assert hasattr (result , "F" ) # Features
149
- assert hasattr (result , "C" ) # Coordinates
148
+ assert isinstance (result , ME .SparseTensor )
150
149
assert result .F .shape [0 ] == 4 # Number of points
151
150
assert result .F .shape [1 ] == 1 # Feature dimension
152
151
153
- def test_minkowski_to_torch_sparse_real (self ):
152
+ def test_minkowski_to_torch_sparse (self ):
154
153
# Create a MinkowskiEngine SparseTensor
155
154
coordinates = torch .tensor (
156
155
[[0 , 0 , 0 ], [0 , 1 , 0 ], [1 , 0 , 1 ], [1 , 1 , 1 ]], dtype = torch .int
@@ -164,23 +163,53 @@ def test_minkowski_to_torch_sparse_real(self):
164
163
assert result .is_sparse
165
164
assert result .values ().numel () == 4
166
165
167
- def test_roundtrip_minkowski (self ):
168
- # Test roundtrip conversion
166
+ def test_already_torch_sparse (self ):
169
167
indices = torch .tensor (
170
- [[0 , 0 , 1 ], [0 , 1 , 0 ], [0 , 1 , 1 ], [2 , 3 , 4 ]], dtype = torch .int
168
+ [[0 , 0 , 1 , 1 ], [0 , 1 , 0 , 1 ], [0 , 0 , 1 , 1 ], [3 , 4 , 5 , 6 ]], dtype = torch .int
171
169
)
170
+ values = torch .tensor ([1.0 , 2.0 , 3.0 , 4.0 ])
171
+ shape = (2 , 2 , 2 , 10 ) # batch_size=2, spatial=(2,2), features=10
172
+ tensor = torch .sparse_coo_tensor (indices , values , shape ).coalesce ()
173
+
174
+ result = minkowski_to_torch_sparse (tensor )
175
+
176
+ assert result is tensor
177
+
178
+ def test_roundtrip_minkowski (self ):
179
+ # Test roundtrip conversion
180
+ indices = torch .tensor ([[0 , 0 , 1 ], [0 , 1 , 0 ], [1 , 1 , 1 ]], dtype = torch .int ).T
172
181
values = torch .tensor ([1.0 , 2.0 , 3.0 ])
173
- shape = (2 , 2 , 2 , 5 )
182
+ shape = (2 , 2 , 2 )
174
183
tensor = torch .sparse_coo_tensor (indices , values , shape ).coalesce ()
175
184
176
185
me_tensor = torch_sparse_to_minkowski (tensor )
177
186
back_to_torch = minkowski_to_torch_sparse (
178
- me_tensor , full_scale_spatial_shape = [2 , 2 ]
187
+ me_tensor , full_scale_spatial_shape = [2 , 2 ], squeeze = True
179
188
)
180
189
181
- # Check that we get back similar structure (may not be identical due to coordinate handling)
182
190
assert back_to_torch .is_sparse
183
- assert back_to_torch .shape [0 ] == 2 # batch size preserved
191
+ assert back_to_torch .shape [0 ] == 2
192
+ assert torch .equal (back_to_torch .indices (), tensor .indices ())
193
+ assert torch .equal (back_to_torch .values (), tensor .values ())
194
+
195
+ # Test with tensor spatial shape
196
+ back_to_torch_2 = minkowski_to_torch_sparse (
197
+ me_tensor , full_scale_spatial_shape = torch .tensor ([2 , 2 ]), squeeze = True
198
+ )
199
+
200
+ assert torch .equal (back_to_torch .indices (), back_to_torch_2 .indices ())
201
+ assert torch .equal (back_to_torch .values (), back_to_torch_2 .values ())
202
+
203
+ def test_squeeze_error (self ):
204
+ # Test trying to squeeze without scalar features
205
+ coordinates = torch .tensor (
206
+ [[0 , 0 , 0 ], [0 , 1 , 0 ], [1 , 0 , 1 ], [1 , 1 , 1 ]], dtype = torch .int
207
+ )
208
+ features = torch .tensor ([[1.0 , 1.0 ], [2.0 , 2.0 ], [3.0 , 3.0 ], [4.0 , 4.0 ]])
209
+ sparse_tensor = ME .SparseTensor (features , coordinates )
210
+
211
+ with pytest .raises (ValueError , match = "Got `squeeze`=True" ):
212
+ _ = minkowski_to_torch_sparse (sparse_tensor , squeeze = True )
184
213
185
214
186
215
@pytest .mark .skipif (not has_spconv , reason = "spconv not installed" )
@@ -244,7 +273,6 @@ def test_spconv_squeeze_error(self):
244
273
with pytest .raises (ValueError , match = "Got `squeeze`=True, but" ):
245
274
_ = spconv_to_torch_sparse (sparse_conv_tensor , squeeze = True )
246
275
247
-
248
276
def test_roundtrip_spconv (self ):
249
277
# Test roundtrip conversion
250
278
indices = torch .tensor ([[0 , 0 , 1 ], [0 , 1 , 0 ], [0 , 1 , 1 ], [0 , 0 , 1 ]]).T
0 commit comments