|
36 | 36 | }
|
37 | 37 | }
|
38 | 38 |
|
| 39 | +%typemap(out) std::pair<void*, size_t> { |
| 40 | + $result = PyBytes_FromStringAndSize((char* const)$1.first, $1.second); |
| 41 | +} |
| 42 | + |
39 | 43 | %extend abstract_voxel_storage {
|
40 | 44 | std::vector<abstract_voxel_storage*> components() const {
|
41 | 45 | std::vector<abstract_voxel_storage*> comps;
|
|
88 | 92 | throw std::runtime_error("Unsupported data type, size: " + std::to_string($self->value_bits()));
|
89 | 93 | }
|
90 | 94 | }
|
| 95 | + std::pair<void*, size_t> get_domain_buffer() const { |
| 96 | + auto acvs_voxels = dynamic_cast<abstract_chunked_voxel_storage const*>($self); |
| 97 | + if (!acvs_voxels) { |
| 98 | + throw std::runtime_error("Unsupported"); |
| 99 | + } |
| 100 | + auto offs = acvs_voxels->grid_offset() * acvs_voxels->chunk_size(); |
| 101 | + size_t oi, oj, ok; |
| 102 | + offs.tie(oi, oj, ok); |
| 103 | + auto a = $self->bounds(); |
| 104 | + auto extents = a[1] - a[0] + 1; |
| 105 | + auto num_elements = extents.prod(); |
| 106 | + |
| 107 | + size_t elem_size_bytes; |
| 108 | + if ($self->value_bits() == 1) { |
| 109 | + elem_size_bytes = 1; |
| 110 | + } else if ($self->value_bits() == 8) { |
| 111 | + elem_size_bytes = 1; |
| 112 | + } else if ($self->value_bits() == 32) { |
| 113 | + elem_size_bytes = 4; |
| 114 | + } else if ($self->value_bits() == sizeof(normal_and_curvature<int16_t>) * 8) { |
| 115 | + elem_size_bytes = 16; |
| 116 | + } else { |
| 117 | + throw std::runtime_error("Unsupported data type, size: " + std::to_string($self->value_bits())); |
| 118 | + } |
| 119 | + |
| 120 | + // @nb we choose float as that has the highest alignment requirements most likely |
| 121 | + void* data = new float[num_elements * elem_size_bytes / sizeof(float)]; |
| 122 | + |
| 123 | + size_t index = 0; |
| 124 | + BEGIN_LOOP_I2(a[0], a[1]) |
| 125 | + if (self->value_bits() == 1) { |
| 126 | + ((uint8_t*) (data))[index++] = self->Get(ijk) ? 1 : 0; |
| 127 | + } else if (self->value_bits() == 8) { |
| 128 | + uint8_t v; |
| 129 | + self->Get(ijk, &v); |
| 130 | + ((uint8_t*) (data))[index++] = v; |
| 131 | + } else if (self->value_bits() == 32) { |
| 132 | + uint32_t v; |
| 133 | + self->Get(ijk, &v); |
| 134 | + ((uint32_t*) (data))[index++] = v; |
| 135 | + } else if (self->value_bits() == sizeof(normal_and_curvature<int16_t>) * 8) { |
| 136 | + normal_and_curvature_t::storage_type v; |
| 137 | + self->Get(ijk, &v); |
| 138 | + auto vf = v.convert<float>(); |
| 139 | + for (size_t l = 0; l < 4; ++l) { |
| 140 | + ((float*)(data))[index++] = vf.nxyz_curv[l]; |
| 141 | + } |
| 142 | + } |
| 143 | + END_LOOP; |
| 144 | + return { data, num_elements * elem_size_bytes }; |
| 145 | + } |
| 146 | + PyObject* get_domain() const { |
| 147 | + auto acvs_voxels = dynamic_cast<abstract_chunked_voxel_storage const*>($self); |
| 148 | + if (!acvs_voxels) { |
| 149 | + throw std::runtime_error("Unsupported"); |
| 150 | + } |
| 151 | + auto offs = acvs_voxels->grid_offset() * acvs_voxels->chunk_size(); |
| 152 | + auto a = self->bounds(); |
| 153 | + long oi, oj, ok; |
| 154 | + (offs + a[0].as<long>()).tie(oi, oj, ok); |
| 155 | + auto extents = a[1] - a[0] + 1; |
| 156 | + PyObject* idim = PyTuple_New(extents.get<0>()); |
| 157 | + for (long i = 0; i < extents.get<0>(); ++i) { |
| 158 | + PyObject* jdim = PyTuple_New(extents.get<1>()); |
| 159 | + PyTuple_SetItem(idim, i, jdim); |
| 160 | + for (long j = 0; j < extents.get<1>(); ++j) { |
| 161 | + PyObject* kdim = PyTuple_New(extents.get<2>()); |
| 162 | + PyTuple_SetItem(jdim, j, kdim); |
| 163 | + for (long k = 0; k < extents.get<2>(); ++k) { |
| 164 | + PyTuple_SetItem(kdim, k, abstract_voxel_storage_get__SWIG_0($self, i + oi, j + oj, k + ok)); |
| 165 | + } |
| 166 | + } |
| 167 | + } |
| 168 | + return idim; |
| 169 | + } |
91 | 170 | bool set(long i, long j, long k, PyObject* v) {
|
92 | 171 | auto acvs_voxels = dynamic_cast<abstract_chunked_voxel_storage const*>($self);
|
93 | 172 | if (!acvs_voxels) {
|
|
0 commit comments