1 | // automatically generated by the FlatBuffers compiler, do not modify |
2 | |
3 | |
4 | #ifndef FLATBUFFERS_GENERATED_MESSAGE_ORG_APACHE_ARROW_FLATBUF_H_ |
5 | #define FLATBUFFERS_GENERATED_MESSAGE_ORG_APACHE_ARROW_FLATBUF_H_ |
6 | |
7 | #include "flatbuffers/flatbuffers.h" |
8 | |
9 | #include "Schema_generated.h" |
10 | #include "SparseTensor_generated.h" |
11 | #include "Tensor_generated.h" |
12 | |
13 | namespace org { |
14 | namespace apache { |
15 | namespace arrow { |
16 | namespace flatbuf { |
17 | |
18 | struct FieldNode; |
19 | |
20 | struct RecordBatch; |
21 | |
22 | struct DictionaryBatch; |
23 | |
24 | struct Message; |
25 | |
26 | /// ---------------------------------------------------------------------- |
27 | /// The root Message type |
28 | /// This union enables us to easily send different message types without |
29 | /// redundant storage, and in the future we can easily add new message types. |
30 | /// |
31 | /// Arrow implementations do not need to implement all of the message types, |
32 | /// which may include experimental metadata types. For maximum compatibility, |
33 | /// it is best to send data using RecordBatch |
34 | enum { |
35 | = 0, |
36 | = 1, |
37 | = 2, |
38 | = 3, |
39 | = 4, |
40 | = 5, |
41 | = MessageHeader_NONE, |
42 | = MessageHeader_SparseTensor |
43 | }; |
44 | |
45 | inline const MessageHeader (&())[6] { |
46 | static const MessageHeader values[] = { |
47 | MessageHeader_NONE, |
48 | MessageHeader_Schema, |
49 | MessageHeader_DictionaryBatch, |
50 | MessageHeader_RecordBatch, |
51 | MessageHeader_Tensor, |
52 | MessageHeader_SparseTensor |
53 | }; |
54 | return values; |
55 | } |
56 | |
57 | inline const char * const *() { |
58 | static const char * const names[] = { |
59 | "NONE" , |
60 | "Schema" , |
61 | "DictionaryBatch" , |
62 | "RecordBatch" , |
63 | "Tensor" , |
64 | "SparseTensor" , |
65 | nullptr |
66 | }; |
67 | return names; |
68 | } |
69 | |
70 | inline const char *(MessageHeader e) { |
71 | if (e < MessageHeader_NONE || e > MessageHeader_SparseTensor) return "" ; |
72 | const size_t index = static_cast<size_t>(e); |
73 | return EnumNamesMessageHeader()[index]; |
74 | } |
75 | |
76 | template<typename T> struct { |
77 | static const MessageHeader = MessageHeader_NONE; |
78 | }; |
79 | |
80 | template<> struct <Schema> { |
81 | static const MessageHeader = MessageHeader_Schema; |
82 | }; |
83 | |
84 | template<> struct <DictionaryBatch> { |
85 | static const MessageHeader = MessageHeader_DictionaryBatch; |
86 | }; |
87 | |
88 | template<> struct <RecordBatch> { |
89 | static const MessageHeader = MessageHeader_RecordBatch; |
90 | }; |
91 | |
92 | template<> struct <Tensor> { |
93 | static const MessageHeader = MessageHeader_Tensor; |
94 | }; |
95 | |
96 | template<> struct <SparseTensor> { |
97 | static const MessageHeader = MessageHeader_SparseTensor; |
98 | }; |
99 | |
100 | bool VerifyMessageHeader(flatbuffers::Verifier &verifier, const void *obj, MessageHeader type); |
101 | bool VerifyMessageHeaderVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types); |
102 | |
103 | /// ---------------------------------------------------------------------- |
104 | /// Data structures for describing a table row batch (a collection of |
105 | /// equal-length Arrow arrays) |
106 | /// Metadata about a field at some level of a nested type tree (but not |
107 | /// its children). |
108 | /// |
109 | /// For example, a List<Int16> with values [[1, 2, 3], null, [4], [5, 6], null] |
110 | /// would have {length: 5, null_count: 2} for its List node, and {length: 6, |
111 | /// null_count: 0} for its Int16 node, as separate FieldNode structs |
112 | FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(8) FieldNode FLATBUFFERS_FINAL_CLASS { |
113 | private: |
114 | int64_t length_; |
115 | int64_t null_count_; |
116 | |
117 | public: |
118 | FieldNode() { |
119 | memset(static_cast<void *>(this), 0, sizeof(FieldNode)); |
120 | } |
121 | FieldNode(int64_t _length, int64_t _null_count) |
122 | : length_(flatbuffers::EndianScalar(_length)), |
123 | null_count_(flatbuffers::EndianScalar(_null_count)) { |
124 | } |
125 | /// The number of value slots in the Arrow array at this level of a nested |
126 | /// tree |
127 | int64_t length() const { |
128 | return flatbuffers::EndianScalar(length_); |
129 | } |
130 | /// The number of observed nulls. Fields with null_count == 0 may choose not |
131 | /// to write their physical validity bitmap out as a materialized buffer, |
132 | /// instead setting the length of the bitmap buffer to 0. |
133 | int64_t null_count() const { |
134 | return flatbuffers::EndianScalar(null_count_); |
135 | } |
136 | }; |
137 | FLATBUFFERS_STRUCT_END(FieldNode, 16); |
138 | |
139 | /// A data header describing the shared memory layout of a "record" or "row" |
140 | /// batch. Some systems call this a "row batch" internally and others a "record |
141 | /// batch". |
142 | struct RecordBatch FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
143 | enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { |
144 | VT_LENGTH = 4, |
145 | VT_NODES = 6, |
146 | VT_BUFFERS = 8 |
147 | }; |
148 | /// number of records / rows. The arrays in the batch should all have this |
149 | /// length |
150 | int64_t length() const { |
151 | return GetField<int64_t>(VT_LENGTH, 0); |
152 | } |
153 | /// Nodes correspond to the pre-ordered flattened logical schema |
154 | const flatbuffers::Vector<const FieldNode *> *nodes() const { |
155 | return GetPointer<const flatbuffers::Vector<const FieldNode *> *>(VT_NODES); |
156 | } |
157 | /// Buffers correspond to the pre-ordered flattened buffer tree |
158 | /// |
159 | /// The number of buffers appended to this list depends on the schema. For |
160 | /// example, most primitive arrays will have 2 buffers, 1 for the validity |
161 | /// bitmap and 1 for the values. For struct arrays, there will only be a |
162 | /// single buffer for the validity (nulls) bitmap |
163 | const flatbuffers::Vector<const Buffer *> *buffers() const { |
164 | return GetPointer<const flatbuffers::Vector<const Buffer *> *>(VT_BUFFERS); |
165 | } |
166 | bool Verify(flatbuffers::Verifier &verifier) const { |
167 | return VerifyTableStart(verifier) && |
168 | VerifyField<int64_t>(verifier, VT_LENGTH) && |
169 | VerifyOffset(verifier, VT_NODES) && |
170 | verifier.VerifyVector(nodes()) && |
171 | VerifyOffset(verifier, VT_BUFFERS) && |
172 | verifier.VerifyVector(buffers()) && |
173 | verifier.EndTable(); |
174 | } |
175 | }; |
176 | |
177 | struct RecordBatchBuilder { |
178 | flatbuffers::FlatBufferBuilder &fbb_; |
179 | flatbuffers::uoffset_t start_; |
180 | void add_length(int64_t length) { |
181 | fbb_.AddElement<int64_t>(RecordBatch::VT_LENGTH, length, 0); |
182 | } |
183 | void add_nodes(flatbuffers::Offset<flatbuffers::Vector<const FieldNode *>> nodes) { |
184 | fbb_.AddOffset(RecordBatch::VT_NODES, nodes); |
185 | } |
186 | void add_buffers(flatbuffers::Offset<flatbuffers::Vector<const Buffer *>> buffers) { |
187 | fbb_.AddOffset(RecordBatch::VT_BUFFERS, buffers); |
188 | } |
189 | explicit RecordBatchBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
190 | : fbb_(_fbb) { |
191 | start_ = fbb_.StartTable(); |
192 | } |
193 | RecordBatchBuilder &operator=(const RecordBatchBuilder &); |
194 | flatbuffers::Offset<RecordBatch> Finish() { |
195 | const auto end = fbb_.EndTable(start_); |
196 | auto o = flatbuffers::Offset<RecordBatch>(end); |
197 | return o; |
198 | } |
199 | }; |
200 | |
201 | inline flatbuffers::Offset<RecordBatch> CreateRecordBatch( |
202 | flatbuffers::FlatBufferBuilder &_fbb, |
203 | int64_t length = 0, |
204 | flatbuffers::Offset<flatbuffers::Vector<const FieldNode *>> nodes = 0, |
205 | flatbuffers::Offset<flatbuffers::Vector<const Buffer *>> buffers = 0) { |
206 | RecordBatchBuilder builder_(_fbb); |
207 | builder_.add_length(length); |
208 | builder_.add_buffers(buffers); |
209 | builder_.add_nodes(nodes); |
210 | return builder_.Finish(); |
211 | } |
212 | |
213 | inline flatbuffers::Offset<RecordBatch> CreateRecordBatchDirect( |
214 | flatbuffers::FlatBufferBuilder &_fbb, |
215 | int64_t length = 0, |
216 | const std::vector<FieldNode> *nodes = nullptr, |
217 | const std::vector<Buffer> *buffers = nullptr) { |
218 | auto nodes__ = nodes ? _fbb.CreateVectorOfStructs<FieldNode>(*nodes) : 0; |
219 | auto buffers__ = buffers ? _fbb.CreateVectorOfStructs<Buffer>(*buffers) : 0; |
220 | return org::apache::arrow::flatbuf::CreateRecordBatch( |
221 | _fbb, |
222 | length, |
223 | nodes__, |
224 | buffers__); |
225 | } |
226 | |
227 | /// For sending dictionary encoding information. Any Field can be |
228 | /// dictionary-encoded, but in this case none of its children may be |
229 | /// dictionary-encoded. |
230 | /// There is one vector / column per dictionary, but that vector / column |
231 | /// may be spread across multiple dictionary batches by using the isDelta |
232 | /// flag |
233 | struct DictionaryBatch FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
234 | enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { |
235 | VT_ID = 4, |
236 | VT_DATA = 6, |
237 | VT_ISDELTA = 8 |
238 | }; |
239 | int64_t id() const { |
240 | return GetField<int64_t>(VT_ID, 0); |
241 | } |
242 | const RecordBatch *data() const { |
243 | return GetPointer<const RecordBatch *>(VT_DATA); |
244 | } |
245 | /// If isDelta is true the values in the dictionary are to be appended to a |
246 | /// dictionary with the indicated id |
247 | bool isDelta() const { |
248 | return GetField<uint8_t>(VT_ISDELTA, 0) != 0; |
249 | } |
250 | bool Verify(flatbuffers::Verifier &verifier) const { |
251 | return VerifyTableStart(verifier) && |
252 | VerifyField<int64_t>(verifier, VT_ID) && |
253 | VerifyOffset(verifier, VT_DATA) && |
254 | verifier.VerifyTable(data()) && |
255 | VerifyField<uint8_t>(verifier, VT_ISDELTA) && |
256 | verifier.EndTable(); |
257 | } |
258 | }; |
259 | |
260 | struct DictionaryBatchBuilder { |
261 | flatbuffers::FlatBufferBuilder &fbb_; |
262 | flatbuffers::uoffset_t start_; |
263 | void add_id(int64_t id) { |
264 | fbb_.AddElement<int64_t>(DictionaryBatch::VT_ID, id, 0); |
265 | } |
266 | void add_data(flatbuffers::Offset<RecordBatch> data) { |
267 | fbb_.AddOffset(DictionaryBatch::VT_DATA, data); |
268 | } |
269 | void add_isDelta(bool isDelta) { |
270 | fbb_.AddElement<uint8_t>(DictionaryBatch::VT_ISDELTA, static_cast<uint8_t>(isDelta), 0); |
271 | } |
272 | explicit DictionaryBatchBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
273 | : fbb_(_fbb) { |
274 | start_ = fbb_.StartTable(); |
275 | } |
276 | DictionaryBatchBuilder &operator=(const DictionaryBatchBuilder &); |
277 | flatbuffers::Offset<DictionaryBatch> Finish() { |
278 | const auto end = fbb_.EndTable(start_); |
279 | auto o = flatbuffers::Offset<DictionaryBatch>(end); |
280 | return o; |
281 | } |
282 | }; |
283 | |
284 | inline flatbuffers::Offset<DictionaryBatch> CreateDictionaryBatch( |
285 | flatbuffers::FlatBufferBuilder &_fbb, |
286 | int64_t id = 0, |
287 | flatbuffers::Offset<RecordBatch> data = 0, |
288 | bool isDelta = false) { |
289 | DictionaryBatchBuilder builder_(_fbb); |
290 | builder_.add_id(id); |
291 | builder_.add_data(data); |
292 | builder_.add_isDelta(isDelta); |
293 | return builder_.Finish(); |
294 | } |
295 | |
296 | struct Message FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
297 | enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { |
298 | VT_VERSION = 4, |
299 | = 6, |
300 | = 8, |
301 | VT_BODYLENGTH = 10, |
302 | VT_CUSTOM_METADATA = 12 |
303 | }; |
304 | MetadataVersion version() const { |
305 | return static_cast<MetadataVersion>(GetField<int16_t>(VT_VERSION, 0)); |
306 | } |
307 | MessageHeader () const { |
308 | return static_cast<MessageHeader>(GetField<uint8_t>(VT_HEADER_TYPE, 0)); |
309 | } |
310 | const void *() const { |
311 | return GetPointer<const void *>(VT_HEADER); |
312 | } |
313 | template<typename T> const T *() const; |
314 | const Schema *() const { |
315 | return header_type() == MessageHeader_Schema ? static_cast<const Schema *>(header()) : nullptr; |
316 | } |
317 | const DictionaryBatch *() const { |
318 | return header_type() == MessageHeader_DictionaryBatch ? static_cast<const DictionaryBatch *>(header()) : nullptr; |
319 | } |
320 | const RecordBatch *() const { |
321 | return header_type() == MessageHeader_RecordBatch ? static_cast<const RecordBatch *>(header()) : nullptr; |
322 | } |
323 | const Tensor *() const { |
324 | return header_type() == MessageHeader_Tensor ? static_cast<const Tensor *>(header()) : nullptr; |
325 | } |
326 | const SparseTensor *() const { |
327 | return header_type() == MessageHeader_SparseTensor ? static_cast<const SparseTensor *>(header()) : nullptr; |
328 | } |
329 | int64_t bodyLength() const { |
330 | return GetField<int64_t>(VT_BODYLENGTH, 0); |
331 | } |
332 | const flatbuffers::Vector<flatbuffers::Offset<KeyValue>> *custom_metadata() const { |
333 | return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<KeyValue>> *>(VT_CUSTOM_METADATA); |
334 | } |
335 | bool Verify(flatbuffers::Verifier &verifier) const { |
336 | return VerifyTableStart(verifier) && |
337 | VerifyField<int16_t>(verifier, VT_VERSION) && |
338 | VerifyField<uint8_t>(verifier, VT_HEADER_TYPE) && |
339 | VerifyOffset(verifier, VT_HEADER) && |
340 | VerifyMessageHeader(verifier, header(), header_type()) && |
341 | VerifyField<int64_t>(verifier, VT_BODYLENGTH) && |
342 | VerifyOffset(verifier, VT_CUSTOM_METADATA) && |
343 | verifier.VerifyVector(custom_metadata()) && |
344 | verifier.VerifyVectorOfTables(custom_metadata()) && |
345 | verifier.EndTable(); |
346 | } |
347 | }; |
348 | |
349 | template<> inline const Schema *Message::<Schema>() const { |
350 | return header_as_Schema(); |
351 | } |
352 | |
353 | template<> inline const DictionaryBatch *Message::<DictionaryBatch>() const { |
354 | return header_as_DictionaryBatch(); |
355 | } |
356 | |
357 | template<> inline const RecordBatch *Message::<RecordBatch>() const { |
358 | return header_as_RecordBatch(); |
359 | } |
360 | |
361 | template<> inline const Tensor *Message::<Tensor>() const { |
362 | return header_as_Tensor(); |
363 | } |
364 | |
365 | template<> inline const SparseTensor *Message::<SparseTensor>() const { |
366 | return header_as_SparseTensor(); |
367 | } |
368 | |
369 | struct MessageBuilder { |
370 | flatbuffers::FlatBufferBuilder &fbb_; |
371 | flatbuffers::uoffset_t start_; |
372 | void add_version(MetadataVersion version) { |
373 | fbb_.AddElement<int16_t>(Message::VT_VERSION, static_cast<int16_t>(version), 0); |
374 | } |
375 | void (MessageHeader ) { |
376 | fbb_.AddElement<uint8_t>(Message::VT_HEADER_TYPE, static_cast<uint8_t>(header_type), 0); |
377 | } |
378 | void (flatbuffers::Offset<void> ) { |
379 | fbb_.AddOffset(Message::VT_HEADER, header); |
380 | } |
381 | void add_bodyLength(int64_t bodyLength) { |
382 | fbb_.AddElement<int64_t>(Message::VT_BODYLENGTH, bodyLength, 0); |
383 | } |
384 | void add_custom_metadata(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<KeyValue>>> custom_metadata) { |
385 | fbb_.AddOffset(Message::VT_CUSTOM_METADATA, custom_metadata); |
386 | } |
387 | explicit MessageBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
388 | : fbb_(_fbb) { |
389 | start_ = fbb_.StartTable(); |
390 | } |
391 | MessageBuilder &operator=(const MessageBuilder &); |
392 | flatbuffers::Offset<Message> Finish() { |
393 | const auto end = fbb_.EndTable(start_); |
394 | auto o = flatbuffers::Offset<Message>(end); |
395 | return o; |
396 | } |
397 | }; |
398 | |
399 | inline flatbuffers::Offset<Message> ( |
400 | flatbuffers::FlatBufferBuilder &_fbb, |
401 | MetadataVersion version = MetadataVersion_V1, |
402 | MessageHeader = MessageHeader_NONE, |
403 | flatbuffers::Offset<void> = 0, |
404 | int64_t bodyLength = 0, |
405 | flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<KeyValue>>> custom_metadata = 0) { |
406 | MessageBuilder builder_(_fbb); |
407 | builder_.add_bodyLength(bodyLength); |
408 | builder_.add_custom_metadata(custom_metadata); |
409 | builder_.add_header(header); |
410 | builder_.add_version(version); |
411 | builder_.add_header_type(header_type); |
412 | return builder_.Finish(); |
413 | } |
414 | |
415 | inline flatbuffers::Offset<Message> ( |
416 | flatbuffers::FlatBufferBuilder &_fbb, |
417 | MetadataVersion version = MetadataVersion_V1, |
418 | MessageHeader = MessageHeader_NONE, |
419 | flatbuffers::Offset<void> = 0, |
420 | int64_t bodyLength = 0, |
421 | const std::vector<flatbuffers::Offset<KeyValue>> *custom_metadata = nullptr) { |
422 | auto custom_metadata__ = custom_metadata ? _fbb.CreateVector<flatbuffers::Offset<KeyValue>>(*custom_metadata) : 0; |
423 | return org::apache::arrow::flatbuf::CreateMessage( |
424 | _fbb, |
425 | version, |
426 | header_type, |
427 | header, |
428 | bodyLength, |
429 | custom_metadata__); |
430 | } |
431 | |
432 | inline bool (flatbuffers::Verifier &verifier, const void *obj, MessageHeader type) { |
433 | switch (type) { |
434 | case MessageHeader_NONE: { |
435 | return true; |
436 | } |
437 | case MessageHeader_Schema: { |
438 | auto ptr = reinterpret_cast<const Schema *>(obj); |
439 | return verifier.VerifyTable(ptr); |
440 | } |
441 | case MessageHeader_DictionaryBatch: { |
442 | auto ptr = reinterpret_cast<const DictionaryBatch *>(obj); |
443 | return verifier.VerifyTable(ptr); |
444 | } |
445 | case MessageHeader_RecordBatch: { |
446 | auto ptr = reinterpret_cast<const RecordBatch *>(obj); |
447 | return verifier.VerifyTable(ptr); |
448 | } |
449 | case MessageHeader_Tensor: { |
450 | auto ptr = reinterpret_cast<const Tensor *>(obj); |
451 | return verifier.VerifyTable(ptr); |
452 | } |
453 | case MessageHeader_SparseTensor: { |
454 | auto ptr = reinterpret_cast<const SparseTensor *>(obj); |
455 | return verifier.VerifyTable(ptr); |
456 | } |
457 | default: return false; |
458 | } |
459 | } |
460 | |
461 | inline bool (flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types) { |
462 | if (!values || !types) return !values && !types; |
463 | if (values->size() != types->size()) return false; |
464 | for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { |
465 | if (!VerifyMessageHeader( |
466 | verifier, values->Get(i), types->GetEnum<MessageHeader>(i))) { |
467 | return false; |
468 | } |
469 | } |
470 | return true; |
471 | } |
472 | |
473 | inline const org::apache::arrow::flatbuf::Message *GetMessage(const void *buf) { |
474 | return flatbuffers::GetRoot<org::apache::arrow::flatbuf::Message>(buf); |
475 | } |
476 | |
477 | inline const org::apache::arrow::flatbuf::Message *GetSizePrefixedMessage(const void *buf) { |
478 | return flatbuffers::GetSizePrefixedRoot<org::apache::arrow::flatbuf::Message>(buf); |
479 | } |
480 | |
481 | inline bool VerifyMessageBuffer( |
482 | flatbuffers::Verifier &verifier) { |
483 | return verifier.VerifyBuffer<org::apache::arrow::flatbuf::Message>(nullptr); |
484 | } |
485 | |
486 | inline bool VerifySizePrefixedMessageBuffer( |
487 | flatbuffers::Verifier &verifier) { |
488 | return verifier.VerifySizePrefixedBuffer<org::apache::arrow::flatbuf::Message>(nullptr); |
489 | } |
490 | |
491 | inline void FinishMessageBuffer( |
492 | flatbuffers::FlatBufferBuilder &fbb, |
493 | flatbuffers::Offset<org::apache::arrow::flatbuf::Message> root) { |
494 | fbb.Finish(root); |
495 | } |
496 | |
497 | inline void FinishSizePrefixedMessageBuffer( |
498 | flatbuffers::FlatBufferBuilder &fbb, |
499 | flatbuffers::Offset<org::apache::arrow::flatbuf::Message> root) { |
500 | fbb.FinishSizePrefixed(root); |
501 | } |
502 | |
503 | } // namespace flatbuf |
504 | } // namespace arrow |
505 | } // namespace apache |
506 | } // namespace org |
507 | |
508 | #endif // FLATBUFFERS_GENERATED_MESSAGE_ORG_APACHE_ARROW_FLATBUF_H_ |
509 | |