1 | // automatically generated by the FlatBuffers compiler, do not modify |
2 | |
3 | |
4 | #ifndef FLATBUFFERS_GENERATED_TENSOR_ORG_APACHE_ARROW_FLATBUF_H_ |
5 | #define FLATBUFFERS_GENERATED_TENSOR_ORG_APACHE_ARROW_FLATBUF_H_ |
6 | |
7 | #include "flatbuffers/flatbuffers.h" |
8 | |
9 | #include "Schema_generated.h" |
10 | |
11 | namespace org { |
12 | namespace apache { |
13 | namespace arrow { |
14 | namespace flatbuf { |
15 | |
16 | struct TensorDim; |
17 | |
18 | struct Tensor; |
19 | |
20 | /// ---------------------------------------------------------------------- |
21 | /// Data structures for dense tensors |
22 | /// Shape data for a single axis in a tensor |
23 | struct TensorDim FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
24 | enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { |
25 | VT_SIZE = 4, |
26 | VT_NAME = 6 |
27 | }; |
28 | /// Length of dimension |
29 | int64_t size() const { |
30 | return GetField<int64_t>(VT_SIZE, 0); |
31 | } |
32 | /// Name of the dimension, optional |
33 | const flatbuffers::String *name() const { |
34 | return GetPointer<const flatbuffers::String *>(VT_NAME); |
35 | } |
36 | bool Verify(flatbuffers::Verifier &verifier) const { |
37 | return VerifyTableStart(verifier) && |
38 | VerifyField<int64_t>(verifier, VT_SIZE) && |
39 | VerifyOffset(verifier, VT_NAME) && |
40 | verifier.VerifyString(name()) && |
41 | verifier.EndTable(); |
42 | } |
43 | }; |
44 | |
45 | struct TensorDimBuilder { |
46 | flatbuffers::FlatBufferBuilder &fbb_; |
47 | flatbuffers::uoffset_t start_; |
48 | void add_size(int64_t size) { |
49 | fbb_.AddElement<int64_t>(TensorDim::VT_SIZE, size, 0); |
50 | } |
51 | void add_name(flatbuffers::Offset<flatbuffers::String> name) { |
52 | fbb_.AddOffset(TensorDim::VT_NAME, name); |
53 | } |
54 | explicit TensorDimBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
55 | : fbb_(_fbb) { |
56 | start_ = fbb_.StartTable(); |
57 | } |
58 | TensorDimBuilder &operator=(const TensorDimBuilder &); |
59 | flatbuffers::Offset<TensorDim> Finish() { |
60 | const auto end = fbb_.EndTable(start_); |
61 | auto o = flatbuffers::Offset<TensorDim>(end); |
62 | return o; |
63 | } |
64 | }; |
65 | |
66 | inline flatbuffers::Offset<TensorDim> CreateTensorDim( |
67 | flatbuffers::FlatBufferBuilder &_fbb, |
68 | int64_t size = 0, |
69 | flatbuffers::Offset<flatbuffers::String> name = 0) { |
70 | TensorDimBuilder builder_(_fbb); |
71 | builder_.add_size(size); |
72 | builder_.add_name(name); |
73 | return builder_.Finish(); |
74 | } |
75 | |
76 | inline flatbuffers::Offset<TensorDim> CreateTensorDimDirect( |
77 | flatbuffers::FlatBufferBuilder &_fbb, |
78 | int64_t size = 0, |
79 | const char *name = nullptr) { |
80 | auto name__ = name ? _fbb.CreateString(name) : 0; |
81 | return org::apache::arrow::flatbuf::CreateTensorDim( |
82 | _fbb, |
83 | size, |
84 | name__); |
85 | } |
86 | |
87 | struct Tensor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { |
88 | enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { |
89 | VT_TYPE_TYPE = 4, |
90 | VT_TYPE = 6, |
91 | VT_SHAPE = 8, |
92 | VT_STRIDES = 10, |
93 | VT_DATA = 12 |
94 | }; |
95 | Type type_type() const { |
96 | return static_cast<Type>(GetField<uint8_t>(VT_TYPE_TYPE, 0)); |
97 | } |
98 | /// The type of data contained in a value cell. Currently only fixed-width |
99 | /// value types are supported, no strings or nested types |
100 | const void *type() const { |
101 | return GetPointer<const void *>(VT_TYPE); |
102 | } |
103 | template<typename T> const T *type_as() const; |
104 | const Null *type_as_Null() const { |
105 | return type_type() == Type_Null ? static_cast<const Null *>(type()) : nullptr; |
106 | } |
107 | const Int *type_as_Int() const { |
108 | return type_type() == Type_Int ? static_cast<const Int *>(type()) : nullptr; |
109 | } |
110 | const FloatingPoint *type_as_FloatingPoint() const { |
111 | return type_type() == Type_FloatingPoint ? static_cast<const FloatingPoint *>(type()) : nullptr; |
112 | } |
113 | const Binary *type_as_Binary() const { |
114 | return type_type() == Type_Binary ? static_cast<const Binary *>(type()) : nullptr; |
115 | } |
116 | const Utf8 *type_as_Utf8() const { |
117 | return type_type() == Type_Utf8 ? static_cast<const Utf8 *>(type()) : nullptr; |
118 | } |
119 | const Bool *type_as_Bool() const { |
120 | return type_type() == Type_Bool ? static_cast<const Bool *>(type()) : nullptr; |
121 | } |
122 | const Decimal *type_as_Decimal() const { |
123 | return type_type() == Type_Decimal ? static_cast<const Decimal *>(type()) : nullptr; |
124 | } |
125 | const Date *type_as_Date() const { |
126 | return type_type() == Type_Date ? static_cast<const Date *>(type()) : nullptr; |
127 | } |
128 | const Time *type_as_Time() const { |
129 | return type_type() == Type_Time ? static_cast<const Time *>(type()) : nullptr; |
130 | } |
131 | const Timestamp *type_as_Timestamp() const { |
132 | return type_type() == Type_Timestamp ? static_cast<const Timestamp *>(type()) : nullptr; |
133 | } |
134 | const Interval *type_as_Interval() const { |
135 | return type_type() == Type_Interval ? static_cast<const Interval *>(type()) : nullptr; |
136 | } |
137 | const List *type_as_List() const { |
138 | return type_type() == Type_List ? static_cast<const List *>(type()) : nullptr; |
139 | } |
140 | const Struct_ *type_as_Struct_() const { |
141 | return type_type() == Type_Struct_ ? static_cast<const Struct_ *>(type()) : nullptr; |
142 | } |
143 | const Union *type_as_Union() const { |
144 | return type_type() == Type_Union ? static_cast<const Union *>(type()) : nullptr; |
145 | } |
146 | const FixedSizeBinary *type_as_FixedSizeBinary() const { |
147 | return type_type() == Type_FixedSizeBinary ? static_cast<const FixedSizeBinary *>(type()) : nullptr; |
148 | } |
149 | const FixedSizeList *type_as_FixedSizeList() const { |
150 | return type_type() == Type_FixedSizeList ? static_cast<const FixedSizeList *>(type()) : nullptr; |
151 | } |
152 | const Map *type_as_Map() const { |
153 | return type_type() == Type_Map ? static_cast<const Map *>(type()) : nullptr; |
154 | } |
155 | const Duration *type_as_Duration() const { |
156 | return type_type() == Type_Duration ? static_cast<const Duration *>(type()) : nullptr; |
157 | } |
158 | const LargeBinary *type_as_LargeBinary() const { |
159 | return type_type() == Type_LargeBinary ? static_cast<const LargeBinary *>(type()) : nullptr; |
160 | } |
161 | const LargeUtf8 *type_as_LargeUtf8() const { |
162 | return type_type() == Type_LargeUtf8 ? static_cast<const LargeUtf8 *>(type()) : nullptr; |
163 | } |
164 | const LargeList *type_as_LargeList() const { |
165 | return type_type() == Type_LargeList ? static_cast<const LargeList *>(type()) : nullptr; |
166 | } |
167 | /// The dimensions of the tensor, optionally named |
168 | const flatbuffers::Vector<flatbuffers::Offset<TensorDim>> *shape() const { |
169 | return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<TensorDim>> *>(VT_SHAPE); |
170 | } |
171 | /// Non-negative byte offsets to advance one value cell along each dimension |
172 | const flatbuffers::Vector<int64_t> *strides() const { |
173 | return GetPointer<const flatbuffers::Vector<int64_t> *>(VT_STRIDES); |
174 | } |
175 | /// The location and size of the tensor's data |
176 | const Buffer *data() const { |
177 | return GetStruct<const Buffer *>(VT_DATA); |
178 | } |
179 | bool Verify(flatbuffers::Verifier &verifier) const { |
180 | return VerifyTableStart(verifier) && |
181 | VerifyField<uint8_t>(verifier, VT_TYPE_TYPE) && |
182 | VerifyOffset(verifier, VT_TYPE) && |
183 | VerifyType(verifier, type(), type_type()) && |
184 | VerifyOffset(verifier, VT_SHAPE) && |
185 | verifier.VerifyVector(shape()) && |
186 | verifier.VerifyVectorOfTables(shape()) && |
187 | VerifyOffset(verifier, VT_STRIDES) && |
188 | verifier.VerifyVector(strides()) && |
189 | VerifyField<Buffer>(verifier, VT_DATA) && |
190 | verifier.EndTable(); |
191 | } |
192 | }; |
193 | |
194 | template<> inline const Null *Tensor::type_as<Null>() const { |
195 | return type_as_Null(); |
196 | } |
197 | |
198 | template<> inline const Int *Tensor::type_as<Int>() const { |
199 | return type_as_Int(); |
200 | } |
201 | |
202 | template<> inline const FloatingPoint *Tensor::type_as<FloatingPoint>() const { |
203 | return type_as_FloatingPoint(); |
204 | } |
205 | |
206 | template<> inline const Binary *Tensor::type_as<Binary>() const { |
207 | return type_as_Binary(); |
208 | } |
209 | |
210 | template<> inline const Utf8 *Tensor::type_as<Utf8>() const { |
211 | return type_as_Utf8(); |
212 | } |
213 | |
214 | template<> inline const Bool *Tensor::type_as<Bool>() const { |
215 | return type_as_Bool(); |
216 | } |
217 | |
218 | template<> inline const Decimal *Tensor::type_as<Decimal>() const { |
219 | return type_as_Decimal(); |
220 | } |
221 | |
222 | template<> inline const Date *Tensor::type_as<Date>() const { |
223 | return type_as_Date(); |
224 | } |
225 | |
226 | template<> inline const Time *Tensor::type_as<Time>() const { |
227 | return type_as_Time(); |
228 | } |
229 | |
230 | template<> inline const Timestamp *Tensor::type_as<Timestamp>() const { |
231 | return type_as_Timestamp(); |
232 | } |
233 | |
234 | template<> inline const Interval *Tensor::type_as<Interval>() const { |
235 | return type_as_Interval(); |
236 | } |
237 | |
238 | template<> inline const List *Tensor::type_as<List>() const { |
239 | return type_as_List(); |
240 | } |
241 | |
242 | template<> inline const Struct_ *Tensor::type_as<Struct_>() const { |
243 | return type_as_Struct_(); |
244 | } |
245 | |
246 | template<> inline const Union *Tensor::type_as<Union>() const { |
247 | return type_as_Union(); |
248 | } |
249 | |
250 | template<> inline const FixedSizeBinary *Tensor::type_as<FixedSizeBinary>() const { |
251 | return type_as_FixedSizeBinary(); |
252 | } |
253 | |
254 | template<> inline const FixedSizeList *Tensor::type_as<FixedSizeList>() const { |
255 | return type_as_FixedSizeList(); |
256 | } |
257 | |
258 | template<> inline const Map *Tensor::type_as<Map>() const { |
259 | return type_as_Map(); |
260 | } |
261 | |
262 | template<> inline const Duration *Tensor::type_as<Duration>() const { |
263 | return type_as_Duration(); |
264 | } |
265 | |
266 | template<> inline const LargeBinary *Tensor::type_as<LargeBinary>() const { |
267 | return type_as_LargeBinary(); |
268 | } |
269 | |
270 | template<> inline const LargeUtf8 *Tensor::type_as<LargeUtf8>() const { |
271 | return type_as_LargeUtf8(); |
272 | } |
273 | |
274 | template<> inline const LargeList *Tensor::type_as<LargeList>() const { |
275 | return type_as_LargeList(); |
276 | } |
277 | |
278 | struct TensorBuilder { |
279 | flatbuffers::FlatBufferBuilder &fbb_; |
280 | flatbuffers::uoffset_t start_; |
281 | void add_type_type(Type type_type) { |
282 | fbb_.AddElement<uint8_t>(Tensor::VT_TYPE_TYPE, static_cast<uint8_t>(type_type), 0); |
283 | } |
284 | void add_type(flatbuffers::Offset<void> type) { |
285 | fbb_.AddOffset(Tensor::VT_TYPE, type); |
286 | } |
287 | void add_shape(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<TensorDim>>> shape) { |
288 | fbb_.AddOffset(Tensor::VT_SHAPE, shape); |
289 | } |
290 | void add_strides(flatbuffers::Offset<flatbuffers::Vector<int64_t>> strides) { |
291 | fbb_.AddOffset(Tensor::VT_STRIDES, strides); |
292 | } |
293 | void add_data(const Buffer *data) { |
294 | fbb_.AddStruct(Tensor::VT_DATA, data); |
295 | } |
296 | explicit TensorBuilder(flatbuffers::FlatBufferBuilder &_fbb) |
297 | : fbb_(_fbb) { |
298 | start_ = fbb_.StartTable(); |
299 | } |
300 | TensorBuilder &operator=(const TensorBuilder &); |
301 | flatbuffers::Offset<Tensor> Finish() { |
302 | const auto end = fbb_.EndTable(start_); |
303 | auto o = flatbuffers::Offset<Tensor>(end); |
304 | return o; |
305 | } |
306 | }; |
307 | |
308 | inline flatbuffers::Offset<Tensor> CreateTensor( |
309 | flatbuffers::FlatBufferBuilder &_fbb, |
310 | Type type_type = Type_NONE, |
311 | flatbuffers::Offset<void> type = 0, |
312 | flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<TensorDim>>> shape = 0, |
313 | flatbuffers::Offset<flatbuffers::Vector<int64_t>> strides = 0, |
314 | const Buffer *data = 0) { |
315 | TensorBuilder builder_(_fbb); |
316 | builder_.add_data(data); |
317 | builder_.add_strides(strides); |
318 | builder_.add_shape(shape); |
319 | builder_.add_type(type); |
320 | builder_.add_type_type(type_type); |
321 | return builder_.Finish(); |
322 | } |
323 | |
324 | inline flatbuffers::Offset<Tensor> CreateTensorDirect( |
325 | flatbuffers::FlatBufferBuilder &_fbb, |
326 | Type type_type = Type_NONE, |
327 | flatbuffers::Offset<void> type = 0, |
328 | const std::vector<flatbuffers::Offset<TensorDim>> *shape = nullptr, |
329 | const std::vector<int64_t> *strides = nullptr, |
330 | const Buffer *data = 0) { |
331 | auto shape__ = shape ? _fbb.CreateVector<flatbuffers::Offset<TensorDim>>(*shape) : 0; |
332 | auto strides__ = strides ? _fbb.CreateVector<int64_t>(*strides) : 0; |
333 | return org::apache::arrow::flatbuf::CreateTensor( |
334 | _fbb, |
335 | type_type, |
336 | type, |
337 | shape__, |
338 | strides__, |
339 | data); |
340 | } |
341 | |
342 | inline const org::apache::arrow::flatbuf::Tensor *GetTensor(const void *buf) { |
343 | return flatbuffers::GetRoot<org::apache::arrow::flatbuf::Tensor>(buf); |
344 | } |
345 | |
346 | inline const org::apache::arrow::flatbuf::Tensor *GetSizePrefixedTensor(const void *buf) { |
347 | return flatbuffers::GetSizePrefixedRoot<org::apache::arrow::flatbuf::Tensor>(buf); |
348 | } |
349 | |
350 | inline bool VerifyTensorBuffer( |
351 | flatbuffers::Verifier &verifier) { |
352 | return verifier.VerifyBuffer<org::apache::arrow::flatbuf::Tensor>(nullptr); |
353 | } |
354 | |
355 | inline bool VerifySizePrefixedTensorBuffer( |
356 | flatbuffers::Verifier &verifier) { |
357 | return verifier.VerifySizePrefixedBuffer<org::apache::arrow::flatbuf::Tensor>(nullptr); |
358 | } |
359 | |
360 | inline void FinishTensorBuffer( |
361 | flatbuffers::FlatBufferBuilder &fbb, |
362 | flatbuffers::Offset<org::apache::arrow::flatbuf::Tensor> root) { |
363 | fbb.Finish(root); |
364 | } |
365 | |
366 | inline void FinishSizePrefixedTensorBuffer( |
367 | flatbuffers::FlatBufferBuilder &fbb, |
368 | flatbuffers::Offset<org::apache::arrow::flatbuf::Tensor> root) { |
369 | fbb.FinishSizePrefixed(root); |
370 | } |
371 | |
372 | } // namespace flatbuf |
373 | } // namespace arrow |
374 | } // namespace apache |
375 | } // namespace org |
376 | |
377 | #endif // FLATBUFFERS_GENERATED_TENSOR_ORG_APACHE_ARROW_FLATBUF_H_ |
378 | |