| 1 | // automatically generated by the FlatBuffers compiler, do not modify |
| 2 | |
| 3 | |
| 4 | #ifndef FLATBUFFERS_GENERATED_SPARSETENSOR_ORG_APACHE_ARROW_FLATBUF_H_ |
| 5 | #define FLATBUFFERS_GENERATED_SPARSETENSOR_ORG_APACHE_ARROW_FLATBUF_H_ |
| 6 | |
| 7 | #include "flatbuffers/flatbuffers.h" |
| 8 | |
| 9 | #include "Schema_generated.h" |
| 10 | #include "Tensor_generated.h" |
| 11 | |
| 12 | namespace org { |
| 13 | namespace apache { |
| 14 | namespace arrow { |
| 15 | namespace flatbuf { |
| 16 | |
| 17 | struct SparseTensorIndexCOO; |
| 18 | |
| 19 | struct SparseMatrixIndexCSR; |
| 20 | |
| 21 | struct SparseTensor; |
| 22 | |
| 23 | enum SparseTensorIndex { |
| 24 | SparseTensorIndex_NONE = 0, |
| 25 | SparseTensorIndex_SparseTensorIndexCOO = 1, |
| 26 | SparseTensorIndex_SparseMatrixIndexCSR = 2, |
| 27 | SparseTensorIndex_MIN = SparseTensorIndex_NONE, |
| 28 | SparseTensorIndex_MAX = SparseTensorIndex_SparseMatrixIndexCSR |
| 29 | }; |
| 30 | |
| 31 | inline const SparseTensorIndex (&EnumValuesSparseTensorIndex())[3] { |
| 32 | static const SparseTensorIndex values[] = { |
| 33 | SparseTensorIndex_NONE, |
| 34 | SparseTensorIndex_SparseTensorIndexCOO, |
| 35 | SparseTensorIndex_SparseMatrixIndexCSR |
| 36 | }; |
| 37 | return values; |
| 38 | } |
| 39 | |
| 40 | inline const char * const *EnumNamesSparseTensorIndex() { |
| 41 | static const char * const names[] = { |
| 42 | "NONE" , |
| 43 | "SparseTensorIndexCOO" , |
| 44 | "SparseMatrixIndexCSR" , |
| 45 | nullptr |
| 46 | }; |
| 47 | return names; |
| 48 | } |
| 49 | |
| 50 | inline const char *EnumNameSparseTensorIndex(SparseTensorIndex e) { |
| 51 | if (e < SparseTensorIndex_NONE || e > SparseTensorIndex_SparseMatrixIndexCSR) return "" ; |
| 52 | const size_t index = static_cast<size_t>(e); |
| 53 | return EnumNamesSparseTensorIndex()[index]; |
| 54 | } |
| 55 | |
| 56 | template<typename T> struct { |
| 57 | static const SparseTensorIndex = SparseTensorIndex_NONE; |
| 58 | }; |
| 59 | |
| 60 | template<> struct <SparseTensorIndexCOO> { |
| 61 | static const SparseTensorIndex = SparseTensorIndex_SparseTensorIndexCOO; |
| 62 | }; |
| 63 | |
| 64 | template<> struct <SparseMatrixIndexCSR> { |
| 65 | static const SparseTensorIndex = SparseTensorIndex_SparseMatrixIndexCSR; |
| 66 | }; |
| 67 | |
| 68 | bool VerifySparseTensorIndex(flatbuffers::Verifier &verifier, const void *obj, SparseTensorIndex type); |
| 69 | bool VerifySparseTensorIndexVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types); |
| 70 | |
| 71 | /// ---------------------------------------------------------------------- |
| 72 | /// EXPERIMENTAL: Data structures for sparse tensors |
| 73 | /// Coodinate (COO) format of sparse tensor index. |
| 74 | /// |
| 75 | /// COO's index list are represented as a NxM matrix, |
| 76 | /// where N is the number of non-zero values, |
| 77 | /// and M is the number of dimensions of a sparse tensor. |
| 78 | /// |
| 79 | /// indicesBuffer stores the location and size of the data of this indices |
| 80 | /// matrix. The value type and the stride of the indices matrix is |
| 81 | /// specified in indicesType and indicesStrides fields. |
| 82 | /// |
| 83 | /// For example, let X be a 2x3x4x5 tensor, and it has the following |
| 84 | /// 6 non-zero values: |
| 85 | /// |
| 86 | /// X[0, 1, 2, 0] := 1 |
| 87 | /// X[1, 1, 2, 3] := 2 |
| 88 | /// X[0, 2, 1, 0] := 3 |
| 89 | /// X[0, 1, 3, 0] := 4 |
| 90 | /// X[0, 1, 2, 1] := 5 |
| 91 | /// X[1, 2, 0, 4] := 6 |
| 92 | /// |
| 93 | /// In COO format, the index matrix of X is the following 4x6 matrix: |
| 94 | /// |
| 95 | /// [[0, 0, 0, 0, 1, 1], |
| 96 | /// [1, 1, 1, 2, 1, 2], |
| 97 | /// [2, 2, 3, 1, 2, 0], |
| 98 | /// [0, 1, 0, 0, 3, 4]] |
| 99 | /// |
| 100 | /// Note that the indices are sorted in lexicographical order. |
| 101 | struct SparseTensorIndexCOO FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
| 102 | enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { |
| 103 | VT_INDICESTYPE = 4, |
| 104 | VT_INDICESSTRIDES = 6, |
| 105 | VT_INDICESBUFFER = 8 |
| 106 | }; |
| 107 | /// The type of values in indicesBuffer |
| 108 | const Int *indicesType() const { |
| 109 | return GetPointer<const Int *>(VT_INDICESTYPE); |
| 110 | } |
| 111 | /// Non-negative byte offsets to advance one value cell along each dimension |
| 112 | const flatbuffers::Vector<int64_t> *indicesStrides() const { |
| 113 | return GetPointer<const flatbuffers::Vector<int64_t> *>(VT_INDICESSTRIDES); |
| 114 | } |
| 115 | /// The location and size of the indices matrix's data |
| 116 | const Buffer *indicesBuffer() const { |
| 117 | return GetStruct<const Buffer *>(VT_INDICESBUFFER); |
| 118 | } |
| 119 | bool Verify(flatbuffers::Verifier &verifier) const { |
| 120 | return VerifyTableStart(verifier) && |
| 121 | VerifyOffset(verifier, VT_INDICESTYPE) && |
| 122 | verifier.VerifyTable(indicesType()) && |
| 123 | VerifyOffset(verifier, VT_INDICESSTRIDES) && |
| 124 | verifier.VerifyVector(indicesStrides()) && |
| 125 | VerifyField<Buffer>(verifier, VT_INDICESBUFFER) && |
| 126 | verifier.EndTable(); |
| 127 | } |
| 128 | }; |
| 129 | |
| 130 | struct SparseTensorIndexCOOBuilder { |
| 131 | flatbuffers::FlatBufferBuilder &fbb_; |
| 132 | flatbuffers::uoffset_t start_; |
| 133 | void add_indicesType(flatbuffers::Offset<Int> indicesType) { |
| 134 | fbb_.AddOffset(SparseTensorIndexCOO::VT_INDICESTYPE, indicesType); |
| 135 | } |
| 136 | void add_indicesStrides(flatbuffers::Offset<flatbuffers::Vector<int64_t>> indicesStrides) { |
| 137 | fbb_.AddOffset(SparseTensorIndexCOO::VT_INDICESSTRIDES, indicesStrides); |
| 138 | } |
| 139 | void add_indicesBuffer(const Buffer *indicesBuffer) { |
| 140 | fbb_.AddStruct(SparseTensorIndexCOO::VT_INDICESBUFFER, indicesBuffer); |
| 141 | } |
| 142 | explicit SparseTensorIndexCOOBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
| 143 | : fbb_(_fbb) { |
| 144 | start_ = fbb_.StartTable(); |
| 145 | } |
| 146 | SparseTensorIndexCOOBuilder &operator=(const SparseTensorIndexCOOBuilder &); |
| 147 | flatbuffers::Offset<SparseTensorIndexCOO> Finish() { |
| 148 | const auto end = fbb_.EndTable(start_); |
| 149 | auto o = flatbuffers::Offset<SparseTensorIndexCOO>(end); |
| 150 | return o; |
| 151 | } |
| 152 | }; |
| 153 | |
| 154 | inline flatbuffers::Offset<SparseTensorIndexCOO> CreateSparseTensorIndexCOO( |
| 155 | flatbuffers::FlatBufferBuilder &_fbb, |
| 156 | flatbuffers::Offset<Int> indicesType = 0, |
| 157 | flatbuffers::Offset<flatbuffers::Vector<int64_t>> indicesStrides = 0, |
| 158 | const Buffer *indicesBuffer = 0) { |
| 159 | SparseTensorIndexCOOBuilder builder_(_fbb); |
| 160 | builder_.add_indicesBuffer(indicesBuffer); |
| 161 | builder_.add_indicesStrides(indicesStrides); |
| 162 | builder_.add_indicesType(indicesType); |
| 163 | return builder_.Finish(); |
| 164 | } |
| 165 | |
| 166 | inline flatbuffers::Offset<SparseTensorIndexCOO> CreateSparseTensorIndexCOODirect( |
| 167 | flatbuffers::FlatBufferBuilder &_fbb, |
| 168 | flatbuffers::Offset<Int> indicesType = 0, |
| 169 | const std::vector<int64_t> *indicesStrides = nullptr, |
| 170 | const Buffer *indicesBuffer = 0) { |
| 171 | auto indicesStrides__ = indicesStrides ? _fbb.CreateVector<int64_t>(*indicesStrides) : 0; |
| 172 | return org::apache::arrow::flatbuf::CreateSparseTensorIndexCOO( |
| 173 | _fbb, |
| 174 | indicesType, |
| 175 | indicesStrides__, |
| 176 | indicesBuffer); |
| 177 | } |
| 178 | |
| 179 | /// Compressed Sparse Row format, that is matrix-specific. |
| 180 | struct SparseMatrixIndexCSR FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
| 181 | enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { |
| 182 | VT_INDPTRTYPE = 4, |
| 183 | VT_INDPTRBUFFER = 6, |
| 184 | VT_INDICESTYPE = 8, |
| 185 | VT_INDICESBUFFER = 10 |
| 186 | }; |
| 187 | /// The type of values in indptrBuffer |
| 188 | const Int *indptrType() const { |
| 189 | return GetPointer<const Int *>(VT_INDPTRTYPE); |
| 190 | } |
| 191 | /// indptrBuffer stores the location and size of indptr array that |
| 192 | /// represents the range of the rows. |
| 193 | /// The i-th row spans from indptr[i] to indptr[i+1] in the data. |
| 194 | /// The length of this array is 1 + (the number of rows), and the type |
| 195 | /// of index value is long. |
| 196 | /// |
| 197 | /// For example, let X be the following 6x4 matrix: |
| 198 | /// |
| 199 | /// X := [[0, 1, 2, 0], |
| 200 | /// [0, 0, 3, 0], |
| 201 | /// [0, 4, 0, 5], |
| 202 | /// [0, 0, 0, 0], |
| 203 | /// [6, 0, 7, 8], |
| 204 | /// [0, 9, 0, 0]]. |
| 205 | /// |
| 206 | /// The array of non-zero values in X is: |
| 207 | /// |
| 208 | /// values(X) = [1, 2, 3, 4, 5, 6, 7, 8, 9]. |
| 209 | /// |
| 210 | /// And the indptr of X is: |
| 211 | /// |
| 212 | /// indptr(X) = [0, 2, 3, 5, 5, 8, 10]. |
| 213 | const Buffer *indptrBuffer() const { |
| 214 | return GetStruct<const Buffer *>(VT_INDPTRBUFFER); |
| 215 | } |
| 216 | /// The type of values in indicesBuffer |
| 217 | const Int *indicesType() const { |
| 218 | return GetPointer<const Int *>(VT_INDICESTYPE); |
| 219 | } |
| 220 | /// indicesBuffer stores the location and size of the array that |
| 221 | /// contains the column indices of the corresponding non-zero values. |
| 222 | /// The type of index value is long. |
| 223 | /// |
| 224 | /// For example, the indices of the above X is: |
| 225 | /// |
| 226 | /// indices(X) = [1, 2, 2, 1, 3, 0, 2, 3, 1]. |
| 227 | /// |
| 228 | /// Note that the indices are sorted in lexicographical order for each row. |
| 229 | const Buffer *indicesBuffer() const { |
| 230 | return GetStruct<const Buffer *>(VT_INDICESBUFFER); |
| 231 | } |
| 232 | bool Verify(flatbuffers::Verifier &verifier) const { |
| 233 | return VerifyTableStart(verifier) && |
| 234 | VerifyOffset(verifier, VT_INDPTRTYPE) && |
| 235 | verifier.VerifyTable(indptrType()) && |
| 236 | VerifyField<Buffer>(verifier, VT_INDPTRBUFFER) && |
| 237 | VerifyOffset(verifier, VT_INDICESTYPE) && |
| 238 | verifier.VerifyTable(indicesType()) && |
| 239 | VerifyField<Buffer>(verifier, VT_INDICESBUFFER) && |
| 240 | verifier.EndTable(); |
| 241 | } |
| 242 | }; |
| 243 | |
| 244 | struct SparseMatrixIndexCSRBuilder { |
| 245 | flatbuffers::FlatBufferBuilder &fbb_; |
| 246 | flatbuffers::uoffset_t start_; |
| 247 | void add_indptrType(flatbuffers::Offset<Int> indptrType) { |
| 248 | fbb_.AddOffset(SparseMatrixIndexCSR::VT_INDPTRTYPE, indptrType); |
| 249 | } |
| 250 | void add_indptrBuffer(const Buffer *indptrBuffer) { |
| 251 | fbb_.AddStruct(SparseMatrixIndexCSR::VT_INDPTRBUFFER, indptrBuffer); |
| 252 | } |
| 253 | void add_indicesType(flatbuffers::Offset<Int> indicesType) { |
| 254 | fbb_.AddOffset(SparseMatrixIndexCSR::VT_INDICESTYPE, indicesType); |
| 255 | } |
| 256 | void add_indicesBuffer(const Buffer *indicesBuffer) { |
| 257 | fbb_.AddStruct(SparseMatrixIndexCSR::VT_INDICESBUFFER, indicesBuffer); |
| 258 | } |
| 259 | explicit SparseMatrixIndexCSRBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
| 260 | : fbb_(_fbb) { |
| 261 | start_ = fbb_.StartTable(); |
| 262 | } |
| 263 | SparseMatrixIndexCSRBuilder &operator=(const SparseMatrixIndexCSRBuilder &); |
| 264 | flatbuffers::Offset<SparseMatrixIndexCSR> Finish() { |
| 265 | const auto end = fbb_.EndTable(start_); |
| 266 | auto o = flatbuffers::Offset<SparseMatrixIndexCSR>(end); |
| 267 | return o; |
| 268 | } |
| 269 | }; |
| 270 | |
| 271 | inline flatbuffers::Offset<SparseMatrixIndexCSR> CreateSparseMatrixIndexCSR( |
| 272 | flatbuffers::FlatBufferBuilder &_fbb, |
| 273 | flatbuffers::Offset<Int> indptrType = 0, |
| 274 | const Buffer *indptrBuffer = 0, |
| 275 | flatbuffers::Offset<Int> indicesType = 0, |
| 276 | const Buffer *indicesBuffer = 0) { |
| 277 | SparseMatrixIndexCSRBuilder builder_(_fbb); |
| 278 | builder_.add_indicesBuffer(indicesBuffer); |
| 279 | builder_.add_indicesType(indicesType); |
| 280 | builder_.add_indptrBuffer(indptrBuffer); |
| 281 | builder_.add_indptrType(indptrType); |
| 282 | return builder_.Finish(); |
| 283 | } |
| 284 | |
| 285 | struct SparseTensor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
| 286 | enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { |
| 287 | VT_TYPE_TYPE = 4, |
| 288 | VT_TYPE = 6, |
| 289 | VT_SHAPE = 8, |
| 290 | VT_NON_ZERO_LENGTH = 10, |
| 291 | VT_SPARSEINDEX_TYPE = 12, |
| 292 | VT_SPARSEINDEX = 14, |
| 293 | VT_DATA = 16 |
| 294 | }; |
| 295 | Type type_type() const { |
| 296 | return static_cast<Type>(GetField<uint8_t>(VT_TYPE_TYPE, 0)); |
| 297 | } |
| 298 | /// The type of data contained in a value cell. |
| 299 | /// Currently only fixed-width value types are supported, |
| 300 | /// no strings or nested types. |
| 301 | const void *type() const { |
| 302 | return GetPointer<const void *>(VT_TYPE); |
| 303 | } |
| 304 | template<typename T> const T *type_as() const; |
| 305 | const Null *type_as_Null() const { |
| 306 | return type_type() == Type_Null ? static_cast<const Null *>(type()) : nullptr; |
| 307 | } |
| 308 | const Int *type_as_Int() const { |
| 309 | return type_type() == Type_Int ? static_cast<const Int *>(type()) : nullptr; |
| 310 | } |
| 311 | const FloatingPoint *type_as_FloatingPoint() const { |
| 312 | return type_type() == Type_FloatingPoint ? static_cast<const FloatingPoint *>(type()) : nullptr; |
| 313 | } |
| 314 | const Binary *type_as_Binary() const { |
| 315 | return type_type() == Type_Binary ? static_cast<const Binary *>(type()) : nullptr; |
| 316 | } |
| 317 | const Utf8 *type_as_Utf8() const { |
| 318 | return type_type() == Type_Utf8 ? static_cast<const Utf8 *>(type()) : nullptr; |
| 319 | } |
| 320 | const Bool *type_as_Bool() const { |
| 321 | return type_type() == Type_Bool ? static_cast<const Bool *>(type()) : nullptr; |
| 322 | } |
| 323 | const Decimal *type_as_Decimal() const { |
| 324 | return type_type() == Type_Decimal ? static_cast<const Decimal *>(type()) : nullptr; |
| 325 | } |
| 326 | const Date *type_as_Date() const { |
| 327 | return type_type() == Type_Date ? static_cast<const Date *>(type()) : nullptr; |
| 328 | } |
| 329 | const Time *type_as_Time() const { |
| 330 | return type_type() == Type_Time ? static_cast<const Time *>(type()) : nullptr; |
| 331 | } |
| 332 | const Timestamp *type_as_Timestamp() const { |
| 333 | return type_type() == Type_Timestamp ? static_cast<const Timestamp *>(type()) : nullptr; |
| 334 | } |
| 335 | const Interval *type_as_Interval() const { |
| 336 | return type_type() == Type_Interval ? static_cast<const Interval *>(type()) : nullptr; |
| 337 | } |
| 338 | const List *type_as_List() const { |
| 339 | return type_type() == Type_List ? static_cast<const List *>(type()) : nullptr; |
| 340 | } |
| 341 | const Struct_ *type_as_Struct_() const { |
| 342 | return type_type() == Type_Struct_ ? static_cast<const Struct_ *>(type()) : nullptr; |
| 343 | } |
| 344 | const Union *type_as_Union() const { |
| 345 | return type_type() == Type_Union ? static_cast<const Union *>(type()) : nullptr; |
| 346 | } |
| 347 | const FixedSizeBinary *type_as_FixedSizeBinary() const { |
| 348 | return type_type() == Type_FixedSizeBinary ? static_cast<const FixedSizeBinary *>(type()) : nullptr; |
| 349 | } |
| 350 | const FixedSizeList *type_as_FixedSizeList() const { |
| 351 | return type_type() == Type_FixedSizeList ? static_cast<const FixedSizeList *>(type()) : nullptr; |
| 352 | } |
| 353 | const Map *type_as_Map() const { |
| 354 | return type_type() == Type_Map ? static_cast<const Map *>(type()) : nullptr; |
| 355 | } |
| 356 | const Duration *type_as_Duration() const { |
| 357 | return type_type() == Type_Duration ? static_cast<const Duration *>(type()) : nullptr; |
| 358 | } |
| 359 | const LargeBinary *type_as_LargeBinary() const { |
| 360 | return type_type() == Type_LargeBinary ? static_cast<const LargeBinary *>(type()) : nullptr; |
| 361 | } |
| 362 | const LargeUtf8 *type_as_LargeUtf8() const { |
| 363 | return type_type() == Type_LargeUtf8 ? static_cast<const LargeUtf8 *>(type()) : nullptr; |
| 364 | } |
| 365 | const LargeList *type_as_LargeList() const { |
| 366 | return type_type() == Type_LargeList ? static_cast<const LargeList *>(type()) : nullptr; |
| 367 | } |
| 368 | /// The dimensions of the tensor, optionally named. |
| 369 | const flatbuffers::Vector<flatbuffers::Offset<TensorDim>> *shape() const { |
| 370 | return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<TensorDim>> *>(VT_SHAPE); |
| 371 | } |
| 372 | /// The number of non-zero values in a sparse tensor. |
| 373 | int64_t non_zero_length() const { |
| 374 | return GetField<int64_t>(VT_NON_ZERO_LENGTH, 0); |
| 375 | } |
| 376 | SparseTensorIndex sparseIndex_type() const { |
| 377 | return static_cast<SparseTensorIndex>(GetField<uint8_t>(VT_SPARSEINDEX_TYPE, 0)); |
| 378 | } |
| 379 | /// Sparse tensor index |
| 380 | const void *sparseIndex() const { |
| 381 | return GetPointer<const void *>(VT_SPARSEINDEX); |
| 382 | } |
| 383 | template<typename T> const T *sparseIndex_as() const; |
| 384 | const SparseTensorIndexCOO *sparseIndex_as_SparseTensorIndexCOO() const { |
| 385 | return sparseIndex_type() == SparseTensorIndex_SparseTensorIndexCOO ? static_cast<const SparseTensorIndexCOO *>(sparseIndex()) : nullptr; |
| 386 | } |
| 387 | const SparseMatrixIndexCSR *sparseIndex_as_SparseMatrixIndexCSR() const { |
| 388 | return sparseIndex_type() == SparseTensorIndex_SparseMatrixIndexCSR ? static_cast<const SparseMatrixIndexCSR *>(sparseIndex()) : nullptr; |
| 389 | } |
| 390 | /// The location and size of the tensor's data |
| 391 | const Buffer *data() const { |
| 392 | return GetStruct<const Buffer *>(VT_DATA); |
| 393 | } |
| 394 | bool Verify(flatbuffers::Verifier &verifier) const { |
| 395 | return VerifyTableStart(verifier) && |
| 396 | VerifyField<uint8_t>(verifier, VT_TYPE_TYPE) && |
| 397 | VerifyOffset(verifier, VT_TYPE) && |
| 398 | VerifyType(verifier, type(), type_type()) && |
| 399 | VerifyOffset(verifier, VT_SHAPE) && |
| 400 | verifier.VerifyVector(shape()) && |
| 401 | verifier.VerifyVectorOfTables(shape()) && |
| 402 | VerifyField<int64_t>(verifier, VT_NON_ZERO_LENGTH) && |
| 403 | VerifyField<uint8_t>(verifier, VT_SPARSEINDEX_TYPE) && |
| 404 | VerifyOffset(verifier, VT_SPARSEINDEX) && |
| 405 | VerifySparseTensorIndex(verifier, sparseIndex(), sparseIndex_type()) && |
| 406 | VerifyField<Buffer>(verifier, VT_DATA) && |
| 407 | verifier.EndTable(); |
| 408 | } |
| 409 | }; |
| 410 | |
| 411 | template<> inline const Null *SparseTensor::type_as<Null>() const { |
| 412 | return type_as_Null(); |
| 413 | } |
| 414 | |
| 415 | template<> inline const Int *SparseTensor::type_as<Int>() const { |
| 416 | return type_as_Int(); |
| 417 | } |
| 418 | |
| 419 | template<> inline const FloatingPoint *SparseTensor::type_as<FloatingPoint>() const { |
| 420 | return type_as_FloatingPoint(); |
| 421 | } |
| 422 | |
| 423 | template<> inline const Binary *SparseTensor::type_as<Binary>() const { |
| 424 | return type_as_Binary(); |
| 425 | } |
| 426 | |
| 427 | template<> inline const Utf8 *SparseTensor::type_as<Utf8>() const { |
| 428 | return type_as_Utf8(); |
| 429 | } |
| 430 | |
| 431 | template<> inline const Bool *SparseTensor::type_as<Bool>() const { |
| 432 | return type_as_Bool(); |
| 433 | } |
| 434 | |
| 435 | template<> inline const Decimal *SparseTensor::type_as<Decimal>() const { |
| 436 | return type_as_Decimal(); |
| 437 | } |
| 438 | |
| 439 | template<> inline const Date *SparseTensor::type_as<Date>() const { |
| 440 | return type_as_Date(); |
| 441 | } |
| 442 | |
| 443 | template<> inline const Time *SparseTensor::type_as<Time>() const { |
| 444 | return type_as_Time(); |
| 445 | } |
| 446 | |
| 447 | template<> inline const Timestamp *SparseTensor::type_as<Timestamp>() const { |
| 448 | return type_as_Timestamp(); |
| 449 | } |
| 450 | |
| 451 | template<> inline const Interval *SparseTensor::type_as<Interval>() const { |
| 452 | return type_as_Interval(); |
| 453 | } |
| 454 | |
| 455 | template<> inline const List *SparseTensor::type_as<List>() const { |
| 456 | return type_as_List(); |
| 457 | } |
| 458 | |
| 459 | template<> inline const Struct_ *SparseTensor::type_as<Struct_>() const { |
| 460 | return type_as_Struct_(); |
| 461 | } |
| 462 | |
| 463 | template<> inline const Union *SparseTensor::type_as<Union>() const { |
| 464 | return type_as_Union(); |
| 465 | } |
| 466 | |
| 467 | template<> inline const FixedSizeBinary *SparseTensor::type_as<FixedSizeBinary>() const { |
| 468 | return type_as_FixedSizeBinary(); |
| 469 | } |
| 470 | |
| 471 | template<> inline const FixedSizeList *SparseTensor::type_as<FixedSizeList>() const { |
| 472 | return type_as_FixedSizeList(); |
| 473 | } |
| 474 | |
| 475 | template<> inline const Map *SparseTensor::type_as<Map>() const { |
| 476 | return type_as_Map(); |
| 477 | } |
| 478 | |
| 479 | template<> inline const Duration *SparseTensor::type_as<Duration>() const { |
| 480 | return type_as_Duration(); |
| 481 | } |
| 482 | |
| 483 | template<> inline const LargeBinary *SparseTensor::type_as<LargeBinary>() const { |
| 484 | return type_as_LargeBinary(); |
| 485 | } |
| 486 | |
| 487 | template<> inline const LargeUtf8 *SparseTensor::type_as<LargeUtf8>() const { |
| 488 | return type_as_LargeUtf8(); |
| 489 | } |
| 490 | |
| 491 | template<> inline const LargeList *SparseTensor::type_as<LargeList>() const { |
| 492 | return type_as_LargeList(); |
| 493 | } |
| 494 | |
| 495 | template<> inline const SparseTensorIndexCOO *SparseTensor::sparseIndex_as<SparseTensorIndexCOO>() const { |
| 496 | return sparseIndex_as_SparseTensorIndexCOO(); |
| 497 | } |
| 498 | |
| 499 | template<> inline const SparseMatrixIndexCSR *SparseTensor::sparseIndex_as<SparseMatrixIndexCSR>() const { |
| 500 | return sparseIndex_as_SparseMatrixIndexCSR(); |
| 501 | } |
| 502 | |
| 503 | struct SparseTensorBuilder { |
| 504 | flatbuffers::FlatBufferBuilder &fbb_; |
| 505 | flatbuffers::uoffset_t start_; |
| 506 | void add_type_type(Type type_type) { |
| 507 | fbb_.AddElement<uint8_t>(SparseTensor::VT_TYPE_TYPE, static_cast<uint8_t>(type_type), 0); |
| 508 | } |
| 509 | void add_type(flatbuffers::Offset<void> type) { |
| 510 | fbb_.AddOffset(SparseTensor::VT_TYPE, type); |
| 511 | } |
| 512 | void add_shape(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<TensorDim>>> shape) { |
| 513 | fbb_.AddOffset(SparseTensor::VT_SHAPE, shape); |
| 514 | } |
| 515 | void add_non_zero_length(int64_t non_zero_length) { |
| 516 | fbb_.AddElement<int64_t>(SparseTensor::VT_NON_ZERO_LENGTH, non_zero_length, 0); |
| 517 | } |
| 518 | void add_sparseIndex_type(SparseTensorIndex sparseIndex_type) { |
| 519 | fbb_.AddElement<uint8_t>(SparseTensor::VT_SPARSEINDEX_TYPE, static_cast<uint8_t>(sparseIndex_type), 0); |
| 520 | } |
| 521 | void add_sparseIndex(flatbuffers::Offset<void> sparseIndex) { |
| 522 | fbb_.AddOffset(SparseTensor::VT_SPARSEINDEX, sparseIndex); |
| 523 | } |
| 524 | void add_data(const Buffer *data) { |
| 525 | fbb_.AddStruct(SparseTensor::VT_DATA, data); |
| 526 | } |
| 527 | explicit SparseTensorBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
| 528 | : fbb_(_fbb) { |
| 529 | start_ = fbb_.StartTable(); |
| 530 | } |
| 531 | SparseTensorBuilder &operator=(const SparseTensorBuilder &); |
| 532 | flatbuffers::Offset<SparseTensor> Finish() { |
| 533 | const auto end = fbb_.EndTable(start_); |
| 534 | auto o = flatbuffers::Offset<SparseTensor>(end); |
| 535 | return o; |
| 536 | } |
| 537 | }; |
| 538 | |
| 539 | inline flatbuffers::Offset<SparseTensor> CreateSparseTensor( |
| 540 | flatbuffers::FlatBufferBuilder &_fbb, |
| 541 | Type type_type = Type_NONE, |
| 542 | flatbuffers::Offset<void> type = 0, |
| 543 | flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<TensorDim>>> shape = 0, |
| 544 | int64_t non_zero_length = 0, |
| 545 | SparseTensorIndex sparseIndex_type = SparseTensorIndex_NONE, |
| 546 | flatbuffers::Offset<void> sparseIndex = 0, |
| 547 | const Buffer *data = 0) { |
| 548 | SparseTensorBuilder builder_(_fbb); |
| 549 | builder_.add_non_zero_length(non_zero_length); |
| 550 | builder_.add_data(data); |
| 551 | builder_.add_sparseIndex(sparseIndex); |
| 552 | builder_.add_shape(shape); |
| 553 | builder_.add_type(type); |
| 554 | builder_.add_sparseIndex_type(sparseIndex_type); |
| 555 | builder_.add_type_type(type_type); |
| 556 | return builder_.Finish(); |
| 557 | } |
| 558 | |
| 559 | inline flatbuffers::Offset<SparseTensor> CreateSparseTensorDirect( |
| 560 | flatbuffers::FlatBufferBuilder &_fbb, |
| 561 | Type type_type = Type_NONE, |
| 562 | flatbuffers::Offset<void> type = 0, |
| 563 | const std::vector<flatbuffers::Offset<TensorDim>> *shape = nullptr, |
| 564 | int64_t non_zero_length = 0, |
| 565 | SparseTensorIndex sparseIndex_type = SparseTensorIndex_NONE, |
| 566 | flatbuffers::Offset<void> sparseIndex = 0, |
| 567 | const Buffer *data = 0) { |
| 568 | auto shape__ = shape ? _fbb.CreateVector<flatbuffers::Offset<TensorDim>>(*shape) : 0; |
| 569 | return org::apache::arrow::flatbuf::CreateSparseTensor( |
| 570 | _fbb, |
| 571 | type_type, |
| 572 | type, |
| 573 | shape__, |
| 574 | non_zero_length, |
| 575 | sparseIndex_type, |
| 576 | sparseIndex, |
| 577 | data); |
| 578 | } |
| 579 | |
| 580 | inline bool VerifySparseTensorIndex(flatbuffers::Verifier &verifier, const void *obj, SparseTensorIndex type) { |
| 581 | switch (type) { |
| 582 | case SparseTensorIndex_NONE: { |
| 583 | return true; |
| 584 | } |
| 585 | case SparseTensorIndex_SparseTensorIndexCOO: { |
| 586 | auto ptr = reinterpret_cast<const SparseTensorIndexCOO *>(obj); |
| 587 | return verifier.VerifyTable(ptr); |
| 588 | } |
| 589 | case SparseTensorIndex_SparseMatrixIndexCSR: { |
| 590 | auto ptr = reinterpret_cast<const SparseMatrixIndexCSR *>(obj); |
| 591 | return verifier.VerifyTable(ptr); |
| 592 | } |
| 593 | default: return false; |
| 594 | } |
| 595 | } |
| 596 | |
| 597 | inline bool VerifySparseTensorIndexVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types) { |
| 598 | if (!values || !types) return !values && !types; |
| 599 | if (values->size() != types->size()) return false; |
| 600 | for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { |
| 601 | if (!VerifySparseTensorIndex( |
| 602 | verifier, values->Get(i), types->GetEnum<SparseTensorIndex>(i))) { |
| 603 | return false; |
| 604 | } |
| 605 | } |
| 606 | return true; |
| 607 | } |
| 608 | |
| 609 | inline const org::apache::arrow::flatbuf::SparseTensor *GetSparseTensor(const void *buf) { |
| 610 | return flatbuffers::GetRoot<org::apache::arrow::flatbuf::SparseTensor>(buf); |
| 611 | } |
| 612 | |
| 613 | inline const org::apache::arrow::flatbuf::SparseTensor *GetSizePrefixedSparseTensor(const void *buf) { |
| 614 | return flatbuffers::GetSizePrefixedRoot<org::apache::arrow::flatbuf::SparseTensor>(buf); |
| 615 | } |
| 616 | |
| 617 | inline bool VerifySparseTensorBuffer( |
| 618 | flatbuffers::Verifier &verifier) { |
| 619 | return verifier.VerifyBuffer<org::apache::arrow::flatbuf::SparseTensor>(nullptr); |
| 620 | } |
| 621 | |
| 622 | inline bool VerifySizePrefixedSparseTensorBuffer( |
| 623 | flatbuffers::Verifier &verifier) { |
| 624 | return verifier.VerifySizePrefixedBuffer<org::apache::arrow::flatbuf::SparseTensor>(nullptr); |
| 625 | } |
| 626 | |
| 627 | inline void FinishSparseTensorBuffer( |
| 628 | flatbuffers::FlatBufferBuilder &fbb, |
| 629 | flatbuffers::Offset<org::apache::arrow::flatbuf::SparseTensor> root) { |
| 630 | fbb.Finish(root); |
| 631 | } |
| 632 | |
| 633 | inline void FinishSizePrefixedSparseTensorBuffer( |
| 634 | flatbuffers::FlatBufferBuilder &fbb, |
| 635 | flatbuffers::Offset<org::apache::arrow::flatbuf::SparseTensor> root) { |
| 636 | fbb.FinishSizePrefixed(root); |
| 637 | } |
| 638 | |
| 639 | } // namespace flatbuf |
| 640 | } // namespace arrow |
| 641 | } // namespace apache |
| 642 | } // namespace org |
| 643 | |
| 644 | #endif // FLATBUFFERS_GENERATED_SPARSETENSOR_ORG_APACHE_ARROW_FLATBUF_H_ |
| 645 | |