1 | // automatically generated by the FlatBuffers compiler, do not modify |
2 | |
3 | |
4 | #ifndef FLATBUFFERS_GENERATED_MESSAGE_ORG_APACHE_ARROW_FLATBUF_H_ |
5 | #define FLATBUFFERS_GENERATED_MESSAGE_ORG_APACHE_ARROW_FLATBUF_H_ |
6 | |
7 | #include "flatbuffers/flatbuffers.h" |
8 | |
9 | #include "Schema_generated.h" |
10 | #include "Tensor_generated.h" |
11 | |
12 | namespace org { |
13 | namespace apache { |
14 | namespace arrow { |
15 | namespace flatbuf { |
16 | |
17 | struct FieldNode; |
18 | |
19 | struct RecordBatch; |
20 | |
21 | struct DictionaryBatch; |
22 | |
23 | struct Message; |
24 | |
25 | /// ---------------------------------------------------------------------- |
26 | /// The root Message type |
27 | /// This union enables us to easily send different message types without |
28 | /// redundant storage, and in the future we can easily add new message types. |
29 | /// |
30 | /// Arrow implementations do not need to implement all of the message types, |
31 | /// which may include experimental metadata types. For maximum compatibility, |
32 | /// it is best to send data using RecordBatch |
33 | enum { |
34 | = 0, |
35 | = 1, |
36 | = 2, |
37 | = 3, |
38 | = 4, |
39 | = 5, |
40 | = MessageHeader_NONE, |
41 | = MessageHeader_SparseTensor |
42 | }; |
43 | |
44 | inline const MessageHeader (&())[6] { |
45 | static const MessageHeader values[] = { |
46 | MessageHeader_NONE, |
47 | MessageHeader_Schema, |
48 | MessageHeader_DictionaryBatch, |
49 | MessageHeader_RecordBatch, |
50 | MessageHeader_Tensor, |
51 | MessageHeader_SparseTensor |
52 | }; |
53 | return values; |
54 | } |
55 | |
56 | inline const char * const *() { |
57 | static const char * const names[] = { |
58 | "NONE" , |
59 | "Schema" , |
60 | "DictionaryBatch" , |
61 | "RecordBatch" , |
62 | "Tensor" , |
63 | "SparseTensor" , |
64 | nullptr |
65 | }; |
66 | return names; |
67 | } |
68 | |
69 | inline const char *(MessageHeader e) { |
70 | const size_t index = static_cast<int>(e); |
71 | return EnumNamesMessageHeader()[index]; |
72 | } |
73 | |
74 | template<typename T> struct { |
75 | static const MessageHeader = MessageHeader_NONE; |
76 | }; |
77 | |
78 | template<> struct <Schema> { |
79 | static const MessageHeader = MessageHeader_Schema; |
80 | }; |
81 | |
82 | template<> struct <DictionaryBatch> { |
83 | static const MessageHeader = MessageHeader_DictionaryBatch; |
84 | }; |
85 | |
86 | template<> struct <RecordBatch> { |
87 | static const MessageHeader = MessageHeader_RecordBatch; |
88 | }; |
89 | |
90 | template<> struct <Tensor> { |
91 | static const MessageHeader = MessageHeader_Tensor; |
92 | }; |
93 | |
94 | template<> struct <SparseTensor> { |
95 | static const MessageHeader = MessageHeader_SparseTensor; |
96 | }; |
97 | |
98 | bool VerifyMessageHeader(flatbuffers::Verifier &verifier, const void *obj, MessageHeader type); |
99 | bool VerifyMessageHeaderVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types); |
100 | |
101 | /// ---------------------------------------------------------------------- |
102 | /// Data structures for describing a table row batch (a collection of |
103 | /// equal-length Arrow arrays) |
104 | /// Metadata about a field at some level of a nested type tree (but not |
105 | /// its children). |
106 | /// |
107 | /// For example, a List<Int16> with values [[1, 2, 3], null, [4], [5, 6], null] |
108 | /// would have {length: 5, null_count: 2} for its List node, and {length: 6, |
109 | /// null_count: 0} for its Int16 node, as separate FieldNode structs |
110 | FLATBUFFERS_MANUALLY_ALIGNED_STRUCT(8) FieldNode FLATBUFFERS_FINAL_CLASS { |
111 | private: |
112 | int64_t length_; |
113 | int64_t null_count_; |
114 | |
115 | public: |
116 | FieldNode() { |
117 | memset(this, 0, sizeof(FieldNode)); |
118 | } |
119 | FieldNode(int64_t _length, int64_t _null_count) |
120 | : length_(flatbuffers::EndianScalar(_length)), |
121 | null_count_(flatbuffers::EndianScalar(_null_count)) { |
122 | } |
123 | /// The number of value slots in the Arrow array at this level of a nested |
124 | /// tree |
125 | int64_t length() const { |
126 | return flatbuffers::EndianScalar(length_); |
127 | } |
128 | /// The number of observed nulls. Fields with null_count == 0 may choose not |
129 | /// to write their physical validity bitmap out as a materialized buffer, |
130 | /// instead setting the length of the bitmap buffer to 0. |
131 | int64_t null_count() const { |
132 | return flatbuffers::EndianScalar(null_count_); |
133 | } |
134 | }; |
135 | FLATBUFFERS_STRUCT_END(FieldNode, 16); |
136 | |
137 | /// A data header describing the shared memory layout of a "record" or "row" |
138 | /// batch. Some systems call this a "row batch" internally and others a "record |
139 | /// batch". |
140 | struct RecordBatch FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
141 | enum { |
142 | VT_LENGTH = 4, |
143 | VT_NODES = 6, |
144 | VT_BUFFERS = 8 |
145 | }; |
146 | /// number of records / rows. The arrays in the batch should all have this |
147 | /// length |
148 | int64_t length() const { |
149 | return GetField<int64_t>(VT_LENGTH, 0); |
150 | } |
151 | /// Nodes correspond to the pre-ordered flattened logical schema |
152 | const flatbuffers::Vector<const FieldNode *> *nodes() const { |
153 | return GetPointer<const flatbuffers::Vector<const FieldNode *> *>(VT_NODES); |
154 | } |
155 | /// Buffers correspond to the pre-ordered flattened buffer tree |
156 | /// |
157 | /// The number of buffers appended to this list depends on the schema. For |
158 | /// example, most primitive arrays will have 2 buffers, 1 for the validity |
159 | /// bitmap and 1 for the values. For struct arrays, there will only be a |
160 | /// single buffer for the validity (nulls) bitmap |
161 | const flatbuffers::Vector<const Buffer *> *buffers() const { |
162 | return GetPointer<const flatbuffers::Vector<const Buffer *> *>(VT_BUFFERS); |
163 | } |
164 | bool Verify(flatbuffers::Verifier &verifier) const { |
165 | return VerifyTableStart(verifier) && |
166 | VerifyField<int64_t>(verifier, VT_LENGTH) && |
167 | VerifyOffset(verifier, VT_NODES) && |
168 | verifier.VerifyVector(nodes()) && |
169 | VerifyOffset(verifier, VT_BUFFERS) && |
170 | verifier.VerifyVector(buffers()) && |
171 | verifier.EndTable(); |
172 | } |
173 | }; |
174 | |
175 | struct RecordBatchBuilder { |
176 | flatbuffers::FlatBufferBuilder &fbb_; |
177 | flatbuffers::uoffset_t start_; |
178 | void add_length(int64_t length) { |
179 | fbb_.AddElement<int64_t>(RecordBatch::VT_LENGTH, length, 0); |
180 | } |
181 | void add_nodes(flatbuffers::Offset<flatbuffers::Vector<const FieldNode *>> nodes) { |
182 | fbb_.AddOffset(RecordBatch::VT_NODES, nodes); |
183 | } |
184 | void add_buffers(flatbuffers::Offset<flatbuffers::Vector<const Buffer *>> buffers) { |
185 | fbb_.AddOffset(RecordBatch::VT_BUFFERS, buffers); |
186 | } |
187 | explicit RecordBatchBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
188 | : fbb_(_fbb) { |
189 | start_ = fbb_.StartTable(); |
190 | } |
191 | RecordBatchBuilder &operator=(const RecordBatchBuilder &); |
192 | flatbuffers::Offset<RecordBatch> Finish() { |
193 | const auto end = fbb_.EndTable(start_); |
194 | auto o = flatbuffers::Offset<RecordBatch>(end); |
195 | return o; |
196 | } |
197 | }; |
198 | |
199 | inline flatbuffers::Offset<RecordBatch> CreateRecordBatch( |
200 | flatbuffers::FlatBufferBuilder &_fbb, |
201 | int64_t length = 0, |
202 | flatbuffers::Offset<flatbuffers::Vector<const FieldNode *>> nodes = 0, |
203 | flatbuffers::Offset<flatbuffers::Vector<const Buffer *>> buffers = 0) { |
204 | RecordBatchBuilder builder_(_fbb); |
205 | builder_.add_length(length); |
206 | builder_.add_buffers(buffers); |
207 | builder_.add_nodes(nodes); |
208 | return builder_.Finish(); |
209 | } |
210 | |
211 | inline flatbuffers::Offset<RecordBatch> CreateRecordBatchDirect( |
212 | flatbuffers::FlatBufferBuilder &_fbb, |
213 | int64_t length = 0, |
214 | const std::vector<FieldNode> *nodes = nullptr, |
215 | const std::vector<Buffer> *buffers = nullptr) { |
216 | return org::apache::arrow::flatbuf::CreateRecordBatch( |
217 | _fbb, |
218 | length, |
219 | nodes ? _fbb.CreateVectorOfStructs<FieldNode>(*nodes) : 0, |
220 | buffers ? _fbb.CreateVectorOfStructs<Buffer>(*buffers) : 0); |
221 | } |
222 | |
223 | /// For sending dictionary encoding information. Any Field can be |
224 | /// dictionary-encoded, but in this case none of its children may be |
225 | /// dictionary-encoded. |
226 | /// There is one vector / column per dictionary, but that vector / column |
227 | /// may be spread across multiple dictionary batches by using the isDelta |
228 | /// flag |
229 | struct DictionaryBatch FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
230 | enum { |
231 | VT_ID = 4, |
232 | VT_DATA = 6, |
233 | VT_ISDELTA = 8 |
234 | }; |
235 | int64_t id() const { |
236 | return GetField<int64_t>(VT_ID, 0); |
237 | } |
238 | const RecordBatch *data() const { |
239 | return GetPointer<const RecordBatch *>(VT_DATA); |
240 | } |
241 | /// If isDelta is true the values in the dictionary are to be appended to a |
242 | /// dictionary with the indicated id |
243 | bool isDelta() const { |
244 | return GetField<uint8_t>(VT_ISDELTA, 0) != 0; |
245 | } |
246 | bool Verify(flatbuffers::Verifier &verifier) const { |
247 | return VerifyTableStart(verifier) && |
248 | VerifyField<int64_t>(verifier, VT_ID) && |
249 | VerifyOffset(verifier, VT_DATA) && |
250 | verifier.VerifyTable(data()) && |
251 | VerifyField<uint8_t>(verifier, VT_ISDELTA) && |
252 | verifier.EndTable(); |
253 | } |
254 | }; |
255 | |
256 | struct DictionaryBatchBuilder { |
257 | flatbuffers::FlatBufferBuilder &fbb_; |
258 | flatbuffers::uoffset_t start_; |
259 | void add_id(int64_t id) { |
260 | fbb_.AddElement<int64_t>(DictionaryBatch::VT_ID, id, 0); |
261 | } |
262 | void add_data(flatbuffers::Offset<RecordBatch> data) { |
263 | fbb_.AddOffset(DictionaryBatch::VT_DATA, data); |
264 | } |
265 | void add_isDelta(bool isDelta) { |
266 | fbb_.AddElement<uint8_t>(DictionaryBatch::VT_ISDELTA, static_cast<uint8_t>(isDelta), 0); |
267 | } |
268 | explicit DictionaryBatchBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
269 | : fbb_(_fbb) { |
270 | start_ = fbb_.StartTable(); |
271 | } |
272 | DictionaryBatchBuilder &operator=(const DictionaryBatchBuilder &); |
273 | flatbuffers::Offset<DictionaryBatch> Finish() { |
274 | const auto end = fbb_.EndTable(start_); |
275 | auto o = flatbuffers::Offset<DictionaryBatch>(end); |
276 | return o; |
277 | } |
278 | }; |
279 | |
280 | inline flatbuffers::Offset<DictionaryBatch> CreateDictionaryBatch( |
281 | flatbuffers::FlatBufferBuilder &_fbb, |
282 | int64_t id = 0, |
283 | flatbuffers::Offset<RecordBatch> data = 0, |
284 | bool isDelta = false) { |
285 | DictionaryBatchBuilder builder_(_fbb); |
286 | builder_.add_id(id); |
287 | builder_.add_data(data); |
288 | builder_.add_isDelta(isDelta); |
289 | return builder_.Finish(); |
290 | } |
291 | |
292 | struct Message FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
293 | enum { |
294 | VT_VERSION = 4, |
295 | = 6, |
296 | = 8, |
297 | VT_BODYLENGTH = 10 |
298 | }; |
299 | MetadataVersion version() const { |
300 | return static_cast<MetadataVersion>(GetField<int16_t>(VT_VERSION, 0)); |
301 | } |
302 | MessageHeader () const { |
303 | return static_cast<MessageHeader>(GetField<uint8_t>(VT_HEADER_TYPE, 0)); |
304 | } |
305 | const void *() const { |
306 | return GetPointer<const void *>(VT_HEADER); |
307 | } |
308 | template<typename T> const T *() const; |
309 | const Schema *() const { |
310 | return header_type() == MessageHeader_Schema ? static_cast<const Schema *>(header()) : nullptr; |
311 | } |
312 | const DictionaryBatch *() const { |
313 | return header_type() == MessageHeader_DictionaryBatch ? static_cast<const DictionaryBatch *>(header()) : nullptr; |
314 | } |
315 | const RecordBatch *() const { |
316 | return header_type() == MessageHeader_RecordBatch ? static_cast<const RecordBatch *>(header()) : nullptr; |
317 | } |
318 | const Tensor *() const { |
319 | return header_type() == MessageHeader_Tensor ? static_cast<const Tensor *>(header()) : nullptr; |
320 | } |
321 | const SparseTensor *() const { |
322 | return header_type() == MessageHeader_SparseTensor ? static_cast<const SparseTensor *>(header()) : nullptr; |
323 | } |
324 | int64_t bodyLength() const { |
325 | return GetField<int64_t>(VT_BODYLENGTH, 0); |
326 | } |
327 | bool Verify(flatbuffers::Verifier &verifier) const { |
328 | return VerifyTableStart(verifier) && |
329 | VerifyField<int16_t>(verifier, VT_VERSION) && |
330 | VerifyField<uint8_t>(verifier, VT_HEADER_TYPE) && |
331 | VerifyOffset(verifier, VT_HEADER) && |
332 | VerifyMessageHeader(verifier, header(), header_type()) && |
333 | VerifyField<int64_t>(verifier, VT_BODYLENGTH) && |
334 | verifier.EndTable(); |
335 | } |
336 | }; |
337 | |
338 | template<> inline const Schema *Message::<Schema>() const { |
339 | return header_as_Schema(); |
340 | } |
341 | |
342 | template<> inline const DictionaryBatch *Message::<DictionaryBatch>() const { |
343 | return header_as_DictionaryBatch(); |
344 | } |
345 | |
346 | template<> inline const RecordBatch *Message::<RecordBatch>() const { |
347 | return header_as_RecordBatch(); |
348 | } |
349 | |
350 | template<> inline const Tensor *Message::<Tensor>() const { |
351 | return header_as_Tensor(); |
352 | } |
353 | |
354 | template<> inline const SparseTensor *Message::<SparseTensor>() const { |
355 | return header_as_SparseTensor(); |
356 | } |
357 | |
358 | struct MessageBuilder { |
359 | flatbuffers::FlatBufferBuilder &fbb_; |
360 | flatbuffers::uoffset_t start_; |
361 | void add_version(MetadataVersion version) { |
362 | fbb_.AddElement<int16_t>(Message::VT_VERSION, static_cast<int16_t>(version), 0); |
363 | } |
364 | void (MessageHeader ) { |
365 | fbb_.AddElement<uint8_t>(Message::VT_HEADER_TYPE, static_cast<uint8_t>(header_type), 0); |
366 | } |
367 | void (flatbuffers::Offset<void> ) { |
368 | fbb_.AddOffset(Message::VT_HEADER, header); |
369 | } |
370 | void add_bodyLength(int64_t bodyLength) { |
371 | fbb_.AddElement<int64_t>(Message::VT_BODYLENGTH, bodyLength, 0); |
372 | } |
373 | explicit MessageBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
374 | : fbb_(_fbb) { |
375 | start_ = fbb_.StartTable(); |
376 | } |
377 | MessageBuilder &operator=(const MessageBuilder &); |
378 | flatbuffers::Offset<Message> Finish() { |
379 | const auto end = fbb_.EndTable(start_); |
380 | auto o = flatbuffers::Offset<Message>(end); |
381 | return o; |
382 | } |
383 | }; |
384 | |
385 | inline flatbuffers::Offset<Message> ( |
386 | flatbuffers::FlatBufferBuilder &_fbb, |
387 | MetadataVersion version = MetadataVersion_V1, |
388 | MessageHeader = MessageHeader_NONE, |
389 | flatbuffers::Offset<void> = 0, |
390 | int64_t bodyLength = 0) { |
391 | MessageBuilder builder_(_fbb); |
392 | builder_.add_bodyLength(bodyLength); |
393 | builder_.add_header(header); |
394 | builder_.add_version(version); |
395 | builder_.add_header_type(header_type); |
396 | return builder_.Finish(); |
397 | } |
398 | |
399 | inline bool (flatbuffers::Verifier &verifier, const void *obj, MessageHeader type) { |
400 | switch (type) { |
401 | case MessageHeader_NONE: { |
402 | return true; |
403 | } |
404 | case MessageHeader_Schema: { |
405 | auto ptr = reinterpret_cast<const Schema *>(obj); |
406 | return verifier.VerifyTable(ptr); |
407 | } |
408 | case MessageHeader_DictionaryBatch: { |
409 | auto ptr = reinterpret_cast<const DictionaryBatch *>(obj); |
410 | return verifier.VerifyTable(ptr); |
411 | } |
412 | case MessageHeader_RecordBatch: { |
413 | auto ptr = reinterpret_cast<const RecordBatch *>(obj); |
414 | return verifier.VerifyTable(ptr); |
415 | } |
416 | case MessageHeader_Tensor: { |
417 | auto ptr = reinterpret_cast<const Tensor *>(obj); |
418 | return verifier.VerifyTable(ptr); |
419 | } |
420 | case MessageHeader_SparseTensor: { |
421 | auto ptr = reinterpret_cast<const SparseTensor *>(obj); |
422 | return verifier.VerifyTable(ptr); |
423 | } |
424 | default: return false; |
425 | } |
426 | } |
427 | |
428 | inline bool (flatbuffers::Verifier &verifier, const flatbuffers::Vector<flatbuffers::Offset<void>> *values, const flatbuffers::Vector<uint8_t> *types) { |
429 | if (!values || !types) return !values && !types; |
430 | if (values->size() != types->size()) return false; |
431 | for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { |
432 | if (!VerifyMessageHeader( |
433 | verifier, values->Get(i), types->GetEnum<MessageHeader>(i))) { |
434 | return false; |
435 | } |
436 | } |
437 | return true; |
438 | } |
439 | |
440 | inline const org::apache::arrow::flatbuf::Message *GetMessage(const void *buf) { |
441 | return flatbuffers::GetRoot<org::apache::arrow::flatbuf::Message>(buf); |
442 | } |
443 | |
444 | inline const org::apache::arrow::flatbuf::Message *GetSizePrefixedMessage(const void *buf) { |
445 | return flatbuffers::GetSizePrefixedRoot<org::apache::arrow::flatbuf::Message>(buf); |
446 | } |
447 | |
448 | inline bool VerifyMessageBuffer( |
449 | flatbuffers::Verifier &verifier) { |
450 | return verifier.VerifyBuffer<org::apache::arrow::flatbuf::Message>(nullptr); |
451 | } |
452 | |
453 | inline bool VerifySizePrefixedMessageBuffer( |
454 | flatbuffers::Verifier &verifier) { |
455 | return verifier.VerifySizePrefixedBuffer<org::apache::arrow::flatbuf::Message>(nullptr); |
456 | } |
457 | |
458 | inline void FinishMessageBuffer( |
459 | flatbuffers::FlatBufferBuilder &fbb, |
460 | flatbuffers::Offset<org::apache::arrow::flatbuf::Message> root) { |
461 | fbb.Finish(root); |
462 | } |
463 | |
464 | inline void FinishSizePrefixedMessageBuffer( |
465 | flatbuffers::FlatBufferBuilder &fbb, |
466 | flatbuffers::Offset<org::apache::arrow::flatbuf::Message> root) { |
467 | fbb.FinishSizePrefixed(root); |
468 | } |
469 | |
470 | } // namespace flatbuf |
471 | } // namespace arrow |
472 | } // namespace apache |
473 | } // namespace org |
474 | |
475 | #endif // FLATBUFFERS_GENERATED_MESSAGE_ORG_APACHE_ARROW_FLATBUF_H_ |
476 | |