1 | // Copyright (c) 2013-2014 Sandstorm Development Group, Inc. and contributors |
2 | // Licensed under the MIT License: |
3 | // |
4 | // Permission is hereby granted, free of charge, to any person obtaining a copy |
5 | // of this software and associated documentation files (the "Software"), to deal |
6 | // in the Software without restriction, including without limitation the rights |
7 | // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
8 | // copies of the Software, and to permit persons to whom the Software is |
9 | // furnished to do so, subject to the following conditions: |
10 | // |
11 | // The above copyright notice and this permission notice shall be included in |
12 | // all copies or substantial portions of the Software. |
13 | // |
14 | // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
17 | // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
18 | // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
19 | // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
20 | // THE SOFTWARE. |
21 | |
22 | #define CAPNP_PRIVATE |
23 | #include "schema-loader.h" |
24 | #include "message.h" |
25 | #include "arena.h" |
26 | #include <kj/debug.h> |
27 | #include <kj/exception.h> |
28 | #include <kj/arena.h> |
29 | #include <kj/vector.h> |
30 | #include <algorithm> |
31 | #include <kj/map.h> |
32 | |
33 | #if _MSC_VER |
34 | #include <atomic> |
35 | #endif |
36 | |
37 | namespace capnp { |
38 | |
39 | namespace { |
40 | |
41 | struct SchemaBindingsPair { |
42 | const _::RawSchema* schema; |
43 | const _::RawBrandedSchema::Scope* scopeBindings; |
44 | |
45 | inline bool operator==(const SchemaBindingsPair& other) const { |
46 | return schema == other.schema && scopeBindings == other.scopeBindings; |
47 | } |
48 | inline uint hashCode() const { |
49 | return kj::hashCode(schema, scopeBindings); |
50 | } |
51 | }; |
52 | |
53 | } // namespace |
54 | |
55 | bool hasDiscriminantValue(const schema::Field::Reader& reader) { |
56 | return reader.getDiscriminantValue() != schema::Field::NO_DISCRIMINANT; |
57 | } |
58 | |
59 | class SchemaLoader::InitializerImpl: public _::RawSchema::Initializer { |
60 | public: |
61 | inline explicit InitializerImpl(const SchemaLoader& loader): loader(loader), callback(nullptr) {} |
62 | inline InitializerImpl(const SchemaLoader& loader, const LazyLoadCallback& callback) |
63 | : loader(loader), callback(callback) {} |
64 | |
65 | inline kj::Maybe<const LazyLoadCallback&> getCallback() const { return callback; } |
66 | |
67 | void init(const _::RawSchema* schema) const override; |
68 | |
69 | inline bool operator==(decltype(nullptr)) const { return callback == nullptr; } |
70 | |
71 | private: |
72 | const SchemaLoader& loader; |
73 | kj::Maybe<const LazyLoadCallback&> callback; |
74 | }; |
75 | |
76 | class SchemaLoader::BrandedInitializerImpl: public _::RawBrandedSchema::Initializer { |
77 | public: |
78 | inline explicit BrandedInitializerImpl(const SchemaLoader& loader): loader(loader) {} |
79 | |
80 | void init(const _::RawBrandedSchema* schema) const override; |
81 | |
82 | private: |
83 | const SchemaLoader& loader; |
84 | }; |
85 | |
86 | class SchemaLoader::Impl { |
87 | public: |
88 | inline explicit Impl(const SchemaLoader& loader) |
89 | : initializer(loader), brandedInitializer(loader) {} |
90 | inline Impl(const SchemaLoader& loader, const LazyLoadCallback& callback) |
91 | : initializer(loader, callback), brandedInitializer(loader) {} |
92 | |
93 | _::RawSchema* load(const schema::Node::Reader& reader, bool isPlaceholder); |
94 | |
95 | _::RawSchema* loadNative(const _::RawSchema* nativeSchema); |
96 | |
97 | _::RawSchema* loadEmpty(uint64_t id, kj::StringPtr name, schema::Node::Which kind, |
98 | bool isPlaceholder); |
99 | // Create a dummy empty schema of the given kind for the given id and load it. |
100 | |
101 | const _::RawBrandedSchema* makeBranded( |
102 | const _::RawSchema* schema, schema::Brand::Reader proto, |
103 | kj::Maybe<kj::ArrayPtr<const _::RawBrandedSchema::Scope>> clientBrand); |
104 | |
105 | struct TryGetResult { |
106 | _::RawSchema* schema; |
107 | kj::Maybe<const LazyLoadCallback&> callback; |
108 | }; |
109 | |
110 | TryGetResult tryGet(uint64_t typeId) const; |
111 | |
112 | const _::RawBrandedSchema* getUnbound(const _::RawSchema* schema); |
113 | |
114 | kj::Array<Schema> getAllLoaded() const; |
115 | |
116 | void requireStructSize(uint64_t id, uint dataWordCount, uint pointerCount); |
117 | // Require any struct nodes loaded with this ID -- in the past and in the future -- to have at |
118 | // least the given sizes. Struct nodes that don't comply will simply be rewritten to comply. |
119 | // This is used to ensure that parents of group nodes have at least the size of the group node, |
120 | // so that allocating a struct that contains a group then getting the group node and setting |
121 | // its fields can't possibly write outside of the allocated space. |
122 | |
123 | kj::Arena arena; |
124 | |
125 | private: |
126 | kj::HashSet<kj::ArrayPtr<const byte>> dedupTable; |
127 | // Records raw segments of memory in the arena against which we my want to de-dupe later |
128 | // additions. Specifically, RawBrandedSchema binding tables are de-duped. |
129 | |
130 | kj::HashMap<uint64_t, _::RawSchema*> schemas; |
131 | kj::HashMap<SchemaBindingsPair, _::RawBrandedSchema*> brands; |
132 | kj::HashMap<const _::RawSchema*, _::RawBrandedSchema*> unboundBrands; |
133 | |
134 | struct RequiredSize { |
135 | uint16_t dataWordCount; |
136 | uint16_t pointerCount; |
137 | }; |
138 | kj::HashMap<uint64_t, RequiredSize> structSizeRequirements; |
139 | |
140 | InitializerImpl initializer; |
141 | BrandedInitializerImpl brandedInitializer; |
142 | |
143 | kj::ArrayPtr<word> makeUncheckedNode(schema::Node::Reader node); |
144 | // Construct a copy of the given schema node, allocated as a single-segment ("unchecked") node |
145 | // within the loader's arena. |
146 | |
147 | kj::ArrayPtr<word> makeUncheckedNodeEnforcingSizeRequirements(schema::Node::Reader node); |
148 | // Like makeUncheckedNode() but if structSizeRequirements has a requirement for this node which |
149 | // is larger than the node claims to be, the size will be edited to comply. This should be rare. |
150 | // If the incoming node is not a struct, any struct size requirements will be ignored, but if |
151 | // such requirements exist, this indicates an inconsistency that could cause exceptions later on |
152 | // (but at least can't cause memory corruption). |
153 | |
154 | kj::ArrayPtr<word> rewriteStructNodeWithSizes( |
155 | schema::Node::Reader node, uint dataWordCount, uint pointerCount); |
156 | // Make a copy of the given node (which must be a struct node) and set its sizes to be the max |
157 | // of what it said already and the given sizes. |
158 | |
159 | // If the encoded node does not meet the given struct size requirements, make a new copy that |
160 | // does. |
161 | void applyStructSizeRequirement(_::RawSchema* raw, uint dataWordCount, uint pointerCount); |
162 | |
163 | const _::RawBrandedSchema* makeBranded(const _::RawSchema* schema, |
164 | kj::ArrayPtr<const _::RawBrandedSchema::Scope> scopes); |
165 | |
166 | kj::ArrayPtr<const _::RawBrandedSchema::Dependency> makeBrandedDependencies( |
167 | const _::RawSchema* schema, |
168 | kj::Maybe<kj::ArrayPtr<const _::RawBrandedSchema::Scope>> bindings); |
169 | |
170 | void makeDep(_::RawBrandedSchema::Binding& result, |
171 | schema::Type::Reader type, kj::StringPtr scopeName, |
172 | kj::Maybe<kj::ArrayPtr<const _::RawBrandedSchema::Scope>> brandBindings); |
173 | void makeDep(_::RawBrandedSchema::Binding& result, |
174 | uint64_t typeId, schema::Type::Which whichType, schema::Node::Which expectedKind, |
175 | schema::Brand::Reader brand, kj::StringPtr scopeName, |
176 | kj::Maybe<kj::ArrayPtr<const _::RawBrandedSchema::Scope>> brandBindings); |
177 | // Looks up the schema and brand for a dependency, or creates lazily-evaluated placeholders if |
178 | // they don't already exist, and fills in `result`. `scopeName` is a human-readable name of the |
179 | // place where the type appeared. |
180 | // |
181 | // Note that we don't simply return a Binding because we need to be careful about initialization |
182 | // to ensure that our byte-based de-duplification works. If we constructed a Binding on the stack |
183 | // and returned it, padding bytes in that Binding could go uninitialized, causing it to appear |
184 | // unique when it's not. It is expected that `result` has been zero'd via memset() before these |
185 | // methods are called. |
186 | |
187 | const _::RawBrandedSchema* makeDepSchema( |
188 | schema::Type::Reader type, kj::StringPtr scopeName, |
189 | kj::Maybe<kj::ArrayPtr<const _::RawBrandedSchema::Scope>> brandBindings); |
190 | const _::RawBrandedSchema* makeDepSchema( |
191 | uint64_t typeId, schema::Type::Which whichType, schema::Node::Which expectedKind, |
192 | schema::Brand::Reader brand, kj::StringPtr scopeName, |
193 | kj::Maybe<kj::ArrayPtr<const _::RawBrandedSchema::Scope>> brandBindings); |
194 | // Invoke makeDep() then return the result's schema, or nullptr if it's a primitive type. |
195 | |
196 | template <typename T> |
197 | kj::ArrayPtr<const T> copyDeduped(kj::ArrayPtr<const T> values); |
198 | template <typename T> |
199 | kj::ArrayPtr<const T> copyDeduped(kj::ArrayPtr<T> values); |
200 | // Copy the given array into the arena and return the copy -- unless an identical array |
201 | // was copied previously, in which case the existing copy is returned. |
202 | |
203 | friend class SchemaLoader::BrandedInitializerImpl; |
204 | }; |
205 | |
206 | // ======================================================================================= |
207 | |
208 | inline static void verifyVoid(Void value) {} |
209 | // Calls to this will break if the parameter type changes to non-void. We use this to detect |
210 | // when the code needs updating. |
211 | |
212 | class SchemaLoader::Validator { |
213 | public: |
214 | Validator(SchemaLoader::Impl& loader): loader(loader) {} |
215 | |
216 | bool validate(const schema::Node::Reader& node) { |
217 | isValid = true; |
218 | nodeName = node.getDisplayName(); |
219 | dependencies.clear(); |
220 | |
221 | KJ_CONTEXT("validating schema node" , nodeName, (uint)node.which()); |
222 | |
223 | if (node.getParameters().size() > 0) { |
224 | KJ_REQUIRE(node.getIsGeneric(), "if parameter list is non-empty, isGeneric must be true" ) { |
225 | isValid = false; |
226 | return false; |
227 | } |
228 | } |
229 | |
230 | switch (node.which()) { |
231 | case schema::Node::FILE: |
232 | verifyVoid(node.getFile()); |
233 | break; |
234 | case schema::Node::STRUCT: |
235 | validate(node.getStruct(), node.getScopeId()); |
236 | break; |
237 | case schema::Node::ENUM: |
238 | validate(node.getEnum()); |
239 | break; |
240 | case schema::Node::INTERFACE: |
241 | validate(node.getInterface()); |
242 | break; |
243 | case schema::Node::CONST: |
244 | validate(node.getConst()); |
245 | break; |
246 | case schema::Node::ANNOTATION: |
247 | validate(node.getAnnotation()); |
248 | break; |
249 | } |
250 | |
251 | // We accept and pass through node types we don't recognize. |
252 | return isValid; |
253 | } |
254 | |
255 | const _::RawSchema** makeDependencyArray(uint32_t* count) { |
256 | *count = dependencies.size(); |
257 | kj::ArrayPtr<const _::RawSchema*> result = |
258 | loader.arena.allocateArray<const _::RawSchema*>(*count); |
259 | uint pos = 0; |
260 | for (auto& dep: dependencies) { |
261 | result[pos++] = dep.value; |
262 | } |
263 | KJ_DASSERT(pos == *count); |
264 | return result.begin(); |
265 | } |
266 | |
267 | const uint16_t* makeMemberInfoArray(uint32_t* count) { |
268 | *count = members.size(); |
269 | kj::ArrayPtr<uint16_t> result = loader.arena.allocateArray<uint16_t>(*count); |
270 | uint pos = 0; |
271 | for (auto& member: members) { |
272 | result[pos++] = member.value; |
273 | } |
274 | KJ_DASSERT(pos == *count); |
275 | return result.begin(); |
276 | } |
277 | |
278 | const uint16_t* makeMembersByDiscriminantArray() { |
279 | return membersByDiscriminant.begin(); |
280 | } |
281 | |
282 | private: |
283 | SchemaLoader::Impl& loader; |
284 | Text::Reader nodeName; |
285 | bool isValid; |
286 | |
287 | // Maps type IDs -> compiled schemas for each dependency. |
288 | // Order is important because makeDependencyArray() compiles a sorted array. |
289 | kj::TreeMap<uint64_t, _::RawSchema*> dependencies; |
290 | |
291 | // Maps name -> index for each member. |
292 | // Order is important because makeMemberInfoArray() compiles a sorted array. |
293 | kj::TreeMap<Text::Reader, uint> members; |
294 | |
295 | kj::ArrayPtr<uint16_t> membersByDiscriminant; |
296 | |
297 | #define VALIDATE_SCHEMA(condition, ...) \ |
298 | KJ_REQUIRE(condition, ##__VA_ARGS__) { isValid = false; return; } |
299 | #define FAIL_VALIDATE_SCHEMA(...) \ |
300 | KJ_FAIL_REQUIRE(__VA_ARGS__) { isValid = false; return; } |
301 | |
302 | void validateMemberName(kj::StringPtr name, uint index) { |
303 | members.upsert(name, index, [&](auto&, auto&&) { |
304 | FAIL_VALIDATE_SCHEMA("duplicate name" , name); |
305 | }); |
306 | } |
307 | |
308 | void validate(const schema::Node::Struct::Reader& structNode, uint64_t scopeId) { |
309 | uint dataSizeInBits = structNode.getDataWordCount() * 64; |
310 | uint pointerCount = structNode.getPointerCount(); |
311 | |
312 | auto fields = structNode.getFields(); |
313 | |
314 | KJ_STACK_ARRAY(bool, sawCodeOrder, fields.size(), 32, 256); |
315 | memset(sawCodeOrder.begin(), 0, sawCodeOrder.size() * sizeof(sawCodeOrder[0])); |
316 | |
317 | KJ_STACK_ARRAY(bool, sawDiscriminantValue, structNode.getDiscriminantCount(), 32, 256); |
318 | memset(sawDiscriminantValue.begin(), 0, |
319 | sawDiscriminantValue.size() * sizeof(sawDiscriminantValue[0])); |
320 | |
321 | if (structNode.getDiscriminantCount() > 0) { |
322 | VALIDATE_SCHEMA(structNode.getDiscriminantCount() != 1, |
323 | "union must have at least two members" ); |
324 | VALIDATE_SCHEMA(structNode.getDiscriminantCount() <= fields.size(), |
325 | "struct can't have more union fields than total fields" ); |
326 | |
327 | VALIDATE_SCHEMA((structNode.getDiscriminantOffset() + 1) * 16 <= dataSizeInBits, |
328 | "union discriminant is out-of-bounds" ); |
329 | } |
330 | |
331 | membersByDiscriminant = loader.arena.allocateArray<uint16_t>(fields.size()); |
332 | uint discriminantPos = 0; |
333 | uint nonDiscriminantPos = structNode.getDiscriminantCount(); |
334 | |
335 | uint index = 0; |
336 | uint nextOrdinal = 0; |
337 | for (auto field: fields) { |
338 | KJ_CONTEXT("validating struct field" , field.getName()); |
339 | |
340 | validateMemberName(field.getName(), index); |
341 | VALIDATE_SCHEMA(field.getCodeOrder() < sawCodeOrder.size() && |
342 | !sawCodeOrder[field.getCodeOrder()], |
343 | "invalid codeOrder" ); |
344 | sawCodeOrder[field.getCodeOrder()] = true; |
345 | |
346 | auto ordinal = field.getOrdinal(); |
347 | if (ordinal.isExplicit()) { |
348 | VALIDATE_SCHEMA(ordinal.getExplicit() >= nextOrdinal, |
349 | "fields were not ordered by ordinal" ); |
350 | nextOrdinal = ordinal.getExplicit() + 1; |
351 | } |
352 | |
353 | if (hasDiscriminantValue(field)) { |
354 | VALIDATE_SCHEMA(field.getDiscriminantValue() < sawDiscriminantValue.size() && |
355 | !sawDiscriminantValue[field.getDiscriminantValue()], |
356 | "invalid discriminantValue" ); |
357 | sawDiscriminantValue[field.getDiscriminantValue()] = true; |
358 | |
359 | membersByDiscriminant[discriminantPos++] = index; |
360 | } else { |
361 | VALIDATE_SCHEMA(nonDiscriminantPos <= fields.size(), |
362 | "discriminantCount did not match fields" ); |
363 | membersByDiscriminant[nonDiscriminantPos++] = index; |
364 | } |
365 | |
366 | switch (field.which()) { |
367 | case schema::Field::SLOT: { |
368 | auto slot = field.getSlot(); |
369 | |
370 | uint fieldBits = 0; |
371 | bool fieldIsPointer = false; |
372 | validate(slot.getType(), slot.getDefaultValue(), &fieldBits, &fieldIsPointer); |
373 | VALIDATE_SCHEMA(fieldBits * (slot.getOffset() + 1) <= dataSizeInBits && |
374 | fieldIsPointer * (slot.getOffset() + 1) <= pointerCount, |
375 | "field offset out-of-bounds" , |
376 | slot.getOffset(), dataSizeInBits, pointerCount); |
377 | |
378 | break; |
379 | } |
380 | |
381 | case schema::Field::GROUP: |
382 | // Require that the group is a struct node. |
383 | validateTypeId(field.getGroup().getTypeId(), schema::Node::STRUCT); |
384 | break; |
385 | } |
386 | |
387 | ++index; |
388 | } |
389 | |
390 | // If the above code is correct, these should pass. |
391 | KJ_ASSERT(discriminantPos == structNode.getDiscriminantCount()); |
392 | KJ_ASSERT(nonDiscriminantPos == fields.size()); |
393 | |
394 | if (structNode.getIsGroup()) { |
395 | VALIDATE_SCHEMA(scopeId != 0, "group node missing scopeId" ); |
396 | |
397 | // Require that the group's scope has at least the same size as the group, so that anyone |
398 | // constructing an instance of the outer scope can safely read/write the group. |
399 | loader.requireStructSize(scopeId, structNode.getDataWordCount(), |
400 | structNode.getPointerCount()); |
401 | |
402 | // Require that the parent type is a struct. |
403 | validateTypeId(scopeId, schema::Node::STRUCT); |
404 | } |
405 | } |
406 | |
407 | void validate(const schema::Node::Enum::Reader& enumNode) { |
408 | auto enumerants = enumNode.getEnumerants(); |
409 | KJ_STACK_ARRAY(bool, sawCodeOrder, enumerants.size(), 32, 256); |
410 | memset(sawCodeOrder.begin(), 0, sawCodeOrder.size() * sizeof(sawCodeOrder[0])); |
411 | |
412 | uint index = 0; |
413 | for (auto enumerant: enumerants) { |
414 | validateMemberName(enumerant.getName(), index++); |
415 | |
416 | VALIDATE_SCHEMA(enumerant.getCodeOrder() < enumerants.size() && |
417 | !sawCodeOrder[enumerant.getCodeOrder()], |
418 | "invalid codeOrder" , enumerant.getName()); |
419 | sawCodeOrder[enumerant.getCodeOrder()] = true; |
420 | } |
421 | } |
422 | |
423 | void validate(const schema::Node::Interface::Reader& interfaceNode) { |
424 | for (auto extend: interfaceNode.getSuperclasses()) { |
425 | validateTypeId(extend.getId(), schema::Node::INTERFACE); |
426 | validate(extend.getBrand()); |
427 | } |
428 | |
429 | auto methods = interfaceNode.getMethods(); |
430 | KJ_STACK_ARRAY(bool, sawCodeOrder, methods.size(), 32, 256); |
431 | memset(sawCodeOrder.begin(), 0, sawCodeOrder.size() * sizeof(sawCodeOrder[0])); |
432 | |
433 | uint index = 0; |
434 | for (auto method: methods) { |
435 | KJ_CONTEXT("validating method" , method.getName()); |
436 | validateMemberName(method.getName(), index++); |
437 | |
438 | VALIDATE_SCHEMA(method.getCodeOrder() < methods.size() && |
439 | !sawCodeOrder[method.getCodeOrder()], |
440 | "invalid codeOrder" ); |
441 | sawCodeOrder[method.getCodeOrder()] = true; |
442 | |
443 | validateTypeId(method.getParamStructType(), schema::Node::STRUCT); |
444 | validate(method.getParamBrand()); |
445 | validateTypeId(method.getResultStructType(), schema::Node::STRUCT); |
446 | validate(method.getResultBrand()); |
447 | } |
448 | } |
449 | |
450 | void validate(const schema::Node::Const::Reader& constNode) { |
451 | uint dummy1; |
452 | bool dummy2; |
453 | validate(constNode.getType(), constNode.getValue(), &dummy1, &dummy2); |
454 | } |
455 | |
456 | void validate(const schema::Node::Annotation::Reader& annotationNode) { |
457 | validate(annotationNode.getType()); |
458 | } |
459 | |
460 | void validate(const schema::Type::Reader& type, const schema::Value::Reader& value, |
461 | uint* dataSizeInBits, bool* isPointer) { |
462 | validate(type); |
463 | |
464 | schema::Value::Which expectedValueType = schema::Value::VOID; |
465 | bool hadCase = false; |
466 | switch (type.which()) { |
467 | #define HANDLE_TYPE(name, bits, ptr) \ |
468 | case schema::Type::name: \ |
469 | expectedValueType = schema::Value::name; \ |
470 | *dataSizeInBits = bits; *isPointer = ptr; \ |
471 | hadCase = true; \ |
472 | break; |
473 | HANDLE_TYPE(VOID, 0, false) |
474 | HANDLE_TYPE(BOOL, 1, false) |
475 | HANDLE_TYPE(INT8, 8, false) |
476 | HANDLE_TYPE(INT16, 16, false) |
477 | HANDLE_TYPE(INT32, 32, false) |
478 | HANDLE_TYPE(INT64, 64, false) |
479 | HANDLE_TYPE(UINT8, 8, false) |
480 | HANDLE_TYPE(UINT16, 16, false) |
481 | HANDLE_TYPE(UINT32, 32, false) |
482 | HANDLE_TYPE(UINT64, 64, false) |
483 | HANDLE_TYPE(FLOAT32, 32, false) |
484 | HANDLE_TYPE(FLOAT64, 64, false) |
485 | HANDLE_TYPE(TEXT, 0, true) |
486 | HANDLE_TYPE(DATA, 0, true) |
487 | HANDLE_TYPE(LIST, 0, true) |
488 | HANDLE_TYPE(ENUM, 16, false) |
489 | HANDLE_TYPE(STRUCT, 0, true) |
490 | HANDLE_TYPE(INTERFACE, 0, true) |
491 | HANDLE_TYPE(ANY_POINTER, 0, true) |
492 | #undef HANDLE_TYPE |
493 | } |
494 | |
495 | if (hadCase) { |
496 | VALIDATE_SCHEMA(value.which() == expectedValueType, "Value did not match type." , |
497 | (uint)value.which(), (uint)expectedValueType); |
498 | } |
499 | } |
500 | |
501 | void validate(const schema::Type::Reader& type) { |
502 | switch (type.which()) { |
503 | case schema::Type::VOID: |
504 | case schema::Type::BOOL: |
505 | case schema::Type::INT8: |
506 | case schema::Type::INT16: |
507 | case schema::Type::INT32: |
508 | case schema::Type::INT64: |
509 | case schema::Type::UINT8: |
510 | case schema::Type::UINT16: |
511 | case schema::Type::UINT32: |
512 | case schema::Type::UINT64: |
513 | case schema::Type::FLOAT32: |
514 | case schema::Type::FLOAT64: |
515 | case schema::Type::TEXT: |
516 | case schema::Type::DATA: |
517 | case schema::Type::ANY_POINTER: |
518 | break; |
519 | |
520 | case schema::Type::STRUCT: { |
521 | auto structType = type.getStruct(); |
522 | validateTypeId(structType.getTypeId(), schema::Node::STRUCT); |
523 | validate(structType.getBrand()); |
524 | break; |
525 | } |
526 | case schema::Type::ENUM: { |
527 | auto enumType = type.getEnum(); |
528 | validateTypeId(enumType.getTypeId(), schema::Node::ENUM); |
529 | validate(enumType.getBrand()); |
530 | break; |
531 | } |
532 | case schema::Type::INTERFACE: { |
533 | auto interfaceType = type.getInterface(); |
534 | validateTypeId(interfaceType.getTypeId(), schema::Node::INTERFACE); |
535 | validate(interfaceType.getBrand()); |
536 | break; |
537 | } |
538 | |
539 | case schema::Type::LIST: |
540 | validate(type.getList().getElementType()); |
541 | break; |
542 | } |
543 | |
544 | // We intentionally allow unknown types. |
545 | } |
546 | |
547 | void validate(const schema::Brand::Reader& brand) { |
548 | for (auto scope: brand.getScopes()) { |
549 | switch (scope.which()) { |
550 | case schema::Brand::Scope::BIND: |
551 | for (auto binding: scope.getBind()) { |
552 | switch (binding.which()) { |
553 | case schema::Brand::Binding::UNBOUND: |
554 | break; |
555 | case schema::Brand::Binding::TYPE: { |
556 | auto type = binding.getType(); |
557 | validate(type); |
558 | bool isPointer = true; |
559 | switch (type.which()) { |
560 | case schema::Type::VOID: |
561 | case schema::Type::BOOL: |
562 | case schema::Type::INT8: |
563 | case schema::Type::INT16: |
564 | case schema::Type::INT32: |
565 | case schema::Type::INT64: |
566 | case schema::Type::UINT8: |
567 | case schema::Type::UINT16: |
568 | case schema::Type::UINT32: |
569 | case schema::Type::UINT64: |
570 | case schema::Type::FLOAT32: |
571 | case schema::Type::FLOAT64: |
572 | case schema::Type::ENUM: |
573 | isPointer = false; |
574 | break; |
575 | |
576 | case schema::Type::TEXT: |
577 | case schema::Type::DATA: |
578 | case schema::Type::ANY_POINTER: |
579 | case schema::Type::STRUCT: |
580 | case schema::Type::INTERFACE: |
581 | case schema::Type::LIST: |
582 | isPointer = true; |
583 | break; |
584 | } |
585 | VALIDATE_SCHEMA(isPointer, |
586 | "generic type parameter must be a pointer type" , type); |
587 | |
588 | break; |
589 | } |
590 | } |
591 | } |
592 | break; |
593 | case schema::Brand::Scope::INHERIT: |
594 | break; |
595 | } |
596 | } |
597 | } |
598 | |
599 | void validateTypeId(uint64_t id, schema::Node::Which expectedKind) { |
600 | _::RawSchema* existing = loader.tryGet(id).schema; |
601 | if (existing != nullptr) { |
602 | auto node = readMessageUnchecked<schema::Node>(existing->encodedNode); |
603 | VALIDATE_SCHEMA(node.which() == expectedKind, |
604 | "expected a different kind of node for this ID" , |
605 | id, (uint)expectedKind, (uint)node.which(), node.getDisplayName()); |
606 | dependencies.upsert(id, existing, [](auto&,auto&&) { /* ignore dupe */ }); |
607 | return; |
608 | } |
609 | |
610 | dependencies.upsert(id, loader.loadEmpty( |
611 | id, kj::str("(unknown type used by " , nodeName , ")" ), expectedKind, true), |
612 | [](auto&,auto&&) { /* ignore dupe */ }); |
613 | } |
614 | |
615 | #undef VALIDATE_SCHEMA |
616 | #undef FAIL_VALIDATE_SCHEMA |
617 | }; |
618 | |
619 | // ======================================================================================= |
620 | |
621 | class SchemaLoader::CompatibilityChecker { |
622 | public: |
623 | CompatibilityChecker(SchemaLoader::Impl& loader): loader(loader) {} |
624 | |
625 | bool shouldReplace(const schema::Node::Reader& existingNode, |
626 | const schema::Node::Reader& replacement, |
627 | bool preferReplacementIfEquivalent) { |
628 | this->existingNode = existingNode; |
629 | this->replacementNode = replacement; |
630 | |
631 | KJ_CONTEXT("checking compatibility with previously-loaded node of the same id" , |
632 | existingNode.getDisplayName()); |
633 | |
634 | KJ_DREQUIRE(existingNode.getId() == replacement.getId()); |
635 | |
636 | nodeName = existingNode.getDisplayName(); |
637 | compatibility = EQUIVALENT; |
638 | |
639 | checkCompatibility(existingNode, replacement); |
640 | |
641 | // Prefer the newer schema. |
642 | return preferReplacementIfEquivalent ? compatibility != OLDER : compatibility == NEWER; |
643 | } |
644 | |
645 | private: |
646 | SchemaLoader::Impl& loader; |
647 | Text::Reader nodeName; |
648 | schema::Node::Reader existingNode; |
649 | schema::Node::Reader replacementNode; |
650 | |
651 | enum Compatibility { |
652 | EQUIVALENT, |
653 | OLDER, |
654 | NEWER, |
655 | INCOMPATIBLE |
656 | }; |
657 | Compatibility compatibility; |
658 | |
659 | #define VALIDATE_SCHEMA(condition, ...) \ |
660 | KJ_REQUIRE(condition, ##__VA_ARGS__) { compatibility = INCOMPATIBLE; return; } |
661 | #define FAIL_VALIDATE_SCHEMA(...) \ |
662 | KJ_FAIL_REQUIRE(__VA_ARGS__) { compatibility = INCOMPATIBLE; return; } |
663 | |
664 | void replacementIsNewer() { |
665 | switch (compatibility) { |
666 | case EQUIVALENT: |
667 | compatibility = NEWER; |
668 | break; |
669 | case OLDER: |
670 | FAIL_VALIDATE_SCHEMA("Schema node contains some changes that are upgrades and some " |
671 | "that are downgrades. All changes must be in the same direction for compatibility." ); |
672 | break; |
673 | case NEWER: |
674 | break; |
675 | case INCOMPATIBLE: |
676 | break; |
677 | } |
678 | } |
679 | |
680 | void replacementIsOlder() { |
681 | switch (compatibility) { |
682 | case EQUIVALENT: |
683 | compatibility = OLDER; |
684 | break; |
685 | case OLDER: |
686 | break; |
687 | case NEWER: |
688 | FAIL_VALIDATE_SCHEMA("Schema node contains some changes that are upgrades and some " |
689 | "that are downgrades. All changes must be in the same direction for compatibility." ); |
690 | break; |
691 | case INCOMPATIBLE: |
692 | break; |
693 | } |
694 | } |
695 | |
696 | void checkCompatibility(const schema::Node::Reader& node, |
697 | const schema::Node::Reader& replacement) { |
698 | // Returns whether `replacement` is equivalent, older than, newer than, or incompatible with |
699 | // `node`. If exceptions are enabled, this will throw an exception on INCOMPATIBLE. |
700 | |
701 | VALIDATE_SCHEMA(node.which() == replacement.which(), |
702 | "kind of declaration changed" ); |
703 | |
704 | // No need to check compatibility of most of the non-body parts of the node: |
705 | // - Arbitrary renaming and moving between scopes is allowed. |
706 | // - Annotations are ignored for compatibility purposes. |
707 | |
708 | if (replacement.getParameters().size() > node.getParameters().size()) { |
709 | replacementIsNewer(); |
710 | } else if (replacement.getParameters().size() < node.getParameters().size()) { |
711 | replacementIsOlder(); |
712 | } |
713 | |
714 | switch (node.which()) { |
715 | case schema::Node::FILE: |
716 | verifyVoid(node.getFile()); |
717 | break; |
718 | case schema::Node::STRUCT: |
719 | checkCompatibility(node.getStruct(), replacement.getStruct(), |
720 | node.getScopeId(), replacement.getScopeId()); |
721 | break; |
722 | case schema::Node::ENUM: |
723 | checkCompatibility(node.getEnum(), replacement.getEnum()); |
724 | break; |
725 | case schema::Node::INTERFACE: |
726 | checkCompatibility(node.getInterface(), replacement.getInterface()); |
727 | break; |
728 | case schema::Node::CONST: |
729 | checkCompatibility(node.getConst(), replacement.getConst()); |
730 | break; |
731 | case schema::Node::ANNOTATION: |
732 | checkCompatibility(node.getAnnotation(), replacement.getAnnotation()); |
733 | break; |
734 | } |
735 | } |
736 | |
737 | void checkCompatibility(const schema::Node::Struct::Reader& structNode, |
738 | const schema::Node::Struct::Reader& replacement, |
739 | uint64_t scopeId, uint64_t replacementScopeId) { |
740 | if (replacement.getDataWordCount() > structNode.getDataWordCount()) { |
741 | replacementIsNewer(); |
742 | } else if (replacement.getDataWordCount() < structNode.getDataWordCount()) { |
743 | replacementIsOlder(); |
744 | } |
745 | if (replacement.getPointerCount() > structNode.getPointerCount()) { |
746 | replacementIsNewer(); |
747 | } else if (replacement.getPointerCount() < structNode.getPointerCount()) { |
748 | replacementIsOlder(); |
749 | } |
750 | if (replacement.getDiscriminantCount() > structNode.getDiscriminantCount()) { |
751 | replacementIsNewer(); |
752 | } else if (replacement.getDiscriminantCount() < structNode.getDiscriminantCount()) { |
753 | replacementIsOlder(); |
754 | } |
755 | |
756 | if (replacement.getDiscriminantCount() > 0 && structNode.getDiscriminantCount() > 0) { |
757 | VALIDATE_SCHEMA(replacement.getDiscriminantOffset() == structNode.getDiscriminantOffset(), |
758 | "union discriminant position changed" ); |
759 | } |
760 | |
761 | // The shared members should occupy corresponding positions in the member lists, since the |
762 | // lists are sorted by ordinal. |
763 | auto fields = structNode.getFields(); |
764 | auto replacementFields = replacement.getFields(); |
765 | uint count = std::min(fields.size(), replacementFields.size()); |
766 | |
767 | if (replacementFields.size() > fields.size()) { |
768 | replacementIsNewer(); |
769 | } else if (replacementFields.size() < fields.size()) { |
770 | replacementIsOlder(); |
771 | } |
772 | |
773 | for (uint i = 0; i < count; i++) { |
774 | checkCompatibility(fields[i], replacementFields[i]); |
775 | } |
776 | |
777 | // For the moment, we allow "upgrading" from non-group to group, mainly so that the |
778 | // placeholders we generate for group parents (which in the absence of more info, we assume to |
779 | // be non-groups) can be replaced with groups. |
780 | // |
781 | // TODO(cleanup): The placeholder approach is really breaking down. Maybe we need to maintain |
782 | // a list of expectations for nodes we haven't loaded yet. |
783 | if (structNode.getIsGroup()) { |
784 | if (replacement.getIsGroup()) { |
785 | VALIDATE_SCHEMA(replacementScopeId == scopeId, "group node's scope changed" ); |
786 | } else { |
787 | replacementIsOlder(); |
788 | } |
789 | } else { |
790 | if (replacement.getIsGroup()) { |
791 | replacementIsNewer(); |
792 | } |
793 | } |
794 | } |
795 | |
796 | void checkCompatibility(const schema::Field::Reader& field, |
797 | const schema::Field::Reader& replacement) { |
798 | KJ_CONTEXT("comparing struct field" , field.getName()); |
799 | |
800 | // A field that is initially not in a union can be upgraded to be in one, as long as it has |
801 | // discriminant 0. |
802 | uint discriminant = hasDiscriminantValue(field) ? field.getDiscriminantValue() : 0; |
803 | uint replacementDiscriminant = |
804 | hasDiscriminantValue(replacement) ? replacement.getDiscriminantValue() : 0; |
805 | VALIDATE_SCHEMA(discriminant == replacementDiscriminant, "Field discriminant changed." ); |
806 | |
807 | switch (field.which()) { |
808 | case schema::Field::SLOT: { |
809 | auto slot = field.getSlot(); |
810 | |
811 | switch (replacement.which()) { |
812 | case schema::Field::SLOT: { |
813 | auto replacementSlot = replacement.getSlot(); |
814 | |
815 | checkCompatibility(slot.getType(), replacementSlot.getType(), |
816 | NO_UPGRADE_TO_STRUCT); |
817 | checkDefaultCompatibility(slot.getDefaultValue(), |
818 | replacementSlot.getDefaultValue()); |
819 | |
820 | VALIDATE_SCHEMA(slot.getOffset() == replacementSlot.getOffset(), |
821 | "field position changed" ); |
822 | break; |
823 | } |
824 | case schema::Field::GROUP: |
825 | checkUpgradeToStruct(slot.getType(), replacement.getGroup().getTypeId(), |
826 | existingNode, field); |
827 | break; |
828 | } |
829 | |
830 | break; |
831 | } |
832 | |
833 | case schema::Field::GROUP: |
834 | switch (replacement.which()) { |
835 | case schema::Field::SLOT: |
836 | checkUpgradeToStruct(replacement.getSlot().getType(), field.getGroup().getTypeId(), |
837 | replacementNode, replacement); |
838 | break; |
839 | case schema::Field::GROUP: |
840 | VALIDATE_SCHEMA(field.getGroup().getTypeId() == replacement.getGroup().getTypeId(), |
841 | "group id changed" ); |
842 | break; |
843 | } |
844 | break; |
845 | } |
846 | } |
847 | |
848 | void checkCompatibility(const schema::Node::Enum::Reader& enumNode, |
849 | const schema::Node::Enum::Reader& replacement) { |
850 | uint size = enumNode.getEnumerants().size(); |
851 | uint replacementSize = replacement.getEnumerants().size(); |
852 | if (replacementSize > size) { |
853 | replacementIsNewer(); |
854 | } else if (replacementSize < size) { |
855 | replacementIsOlder(); |
856 | } |
857 | } |
858 | |
859 | void checkCompatibility(const schema::Node::Interface::Reader& interfaceNode, |
860 | const schema::Node::Interface::Reader& replacement) { |
861 | { |
862 | // Check superclasses. |
863 | |
864 | kj::Vector<uint64_t> superclasses; |
865 | kj::Vector<uint64_t> replacementSuperclasses; |
866 | for (auto superclass: interfaceNode.getSuperclasses()) { |
867 | superclasses.add(superclass.getId()); |
868 | } |
869 | for (auto superclass: replacement.getSuperclasses()) { |
870 | replacementSuperclasses.add(superclass.getId()); |
871 | } |
872 | std::sort(superclasses.begin(), superclasses.end()); |
873 | std::sort(replacementSuperclasses.begin(), replacementSuperclasses.end()); |
874 | |
875 | auto iter = superclasses.begin(); |
876 | auto replacementIter = replacementSuperclasses.begin(); |
877 | |
878 | while (iter != superclasses.end() || replacementIter != replacementSuperclasses.end()) { |
879 | if (iter == superclasses.end()) { |
880 | replacementIsNewer(); |
881 | break; |
882 | } else if (replacementIter == replacementSuperclasses.end()) { |
883 | replacementIsOlder(); |
884 | break; |
885 | } else if (*iter < *replacementIter) { |
886 | replacementIsOlder(); |
887 | ++iter; |
888 | } else if (*iter > *replacementIter) { |
889 | replacementIsNewer(); |
890 | ++replacementIter; |
891 | } else { |
892 | ++iter; |
893 | ++replacementIter; |
894 | } |
895 | } |
896 | } |
897 | |
898 | auto methods = interfaceNode.getMethods(); |
899 | auto replacementMethods = replacement.getMethods(); |
900 | |
901 | if (replacementMethods.size() > methods.size()) { |
902 | replacementIsNewer(); |
903 | } else if (replacementMethods.size() < methods.size()) { |
904 | replacementIsOlder(); |
905 | } |
906 | |
907 | uint count = std::min(methods.size(), replacementMethods.size()); |
908 | |
909 | for (uint i = 0; i < count; i++) { |
910 | checkCompatibility(methods[i], replacementMethods[i]); |
911 | } |
912 | } |
913 | |
914 | void checkCompatibility(const schema::Method::Reader& method, |
915 | const schema::Method::Reader& replacement) { |
916 | KJ_CONTEXT("comparing method" , method.getName()); |
917 | |
918 | // TODO(someday): Allow named parameter list to be replaced by compatible struct type. |
919 | VALIDATE_SCHEMA(method.getParamStructType() == replacement.getParamStructType(), |
920 | "Updated method has different parameters." ); |
921 | VALIDATE_SCHEMA(method.getResultStructType() == replacement.getResultStructType(), |
922 | "Updated method has different results." ); |
923 | } |
924 | |
925 | void checkCompatibility(const schema::Node::Const::Reader& constNode, |
926 | const schema::Node::Const::Reader& replacement) { |
927 | // Who cares? These don't appear on the wire. |
928 | } |
929 | |
930 | void checkCompatibility(const schema::Node::Annotation::Reader& annotationNode, |
931 | const schema::Node::Annotation::Reader& replacement) { |
932 | // Who cares? These don't appear on the wire. |
933 | } |
934 | |
935 | enum UpgradeToStructMode { |
936 | ALLOW_UPGRADE_TO_STRUCT, |
937 | NO_UPGRADE_TO_STRUCT |
938 | }; |
939 | |
940 | void checkCompatibility(const schema::Type::Reader& type, |
941 | const schema::Type::Reader& replacement, |
942 | UpgradeToStructMode upgradeToStructMode) { |
943 | if (replacement.which() != type.which()) { |
944 | // Check for allowed "upgrade" to Data or AnyPointer. |
945 | if (replacement.isData() && canUpgradeToData(type)) { |
946 | replacementIsNewer(); |
947 | return; |
948 | } else if (type.isData() && canUpgradeToData(replacement)) { |
949 | replacementIsOlder(); |
950 | return; |
951 | } else if (replacement.isAnyPointer() && canUpgradeToAnyPointer(type)) { |
952 | replacementIsNewer(); |
953 | return; |
954 | } else if (type.isAnyPointer() && canUpgradeToAnyPointer(replacement)) { |
955 | replacementIsOlder(); |
956 | return; |
957 | } |
958 | |
959 | if (upgradeToStructMode == ALLOW_UPGRADE_TO_STRUCT) { |
960 | if (type.isStruct()) { |
961 | checkUpgradeToStruct(replacement, type.getStruct().getTypeId()); |
962 | return; |
963 | } else if (replacement.isStruct()) { |
964 | checkUpgradeToStruct(type, replacement.getStruct().getTypeId()); |
965 | return; |
966 | } |
967 | } |
968 | |
969 | FAIL_VALIDATE_SCHEMA("a type was changed" ); |
970 | } |
971 | |
972 | switch (type.which()) { |
973 | case schema::Type::VOID: |
974 | case schema::Type::BOOL: |
975 | case schema::Type::INT8: |
976 | case schema::Type::INT16: |
977 | case schema::Type::INT32: |
978 | case schema::Type::INT64: |
979 | case schema::Type::UINT8: |
980 | case schema::Type::UINT16: |
981 | case schema::Type::UINT32: |
982 | case schema::Type::UINT64: |
983 | case schema::Type::FLOAT32: |
984 | case schema::Type::FLOAT64: |
985 | case schema::Type::TEXT: |
986 | case schema::Type::DATA: |
987 | case schema::Type::ANY_POINTER: |
988 | return; |
989 | |
990 | case schema::Type::LIST: |
991 | checkCompatibility(type.getList().getElementType(), replacement.getList().getElementType(), |
992 | ALLOW_UPGRADE_TO_STRUCT); |
993 | return; |
994 | |
995 | case schema::Type::ENUM: |
996 | VALIDATE_SCHEMA(replacement.getEnum().getTypeId() == type.getEnum().getTypeId(), |
997 | "type changed enum type" ); |
998 | return; |
999 | |
1000 | case schema::Type::STRUCT: |
1001 | // TODO(someday): If the IDs don't match, we should compare the two structs for |
1002 | // compatibility. This is tricky, though, because the new type's target may not yet be |
1003 | // loaded. In that case we could take the old type, make a copy of it, assign the new |
1004 | // ID to the copy, and load() that. That forces any struct type loaded for that ID to |
1005 | // be compatible. However, that has another problem, which is that it could be that the |
1006 | // whole reason the type was replaced was to fork that type, and so an incompatibility |
1007 | // could be very much expected. This could be a rat hole... |
1008 | VALIDATE_SCHEMA(replacement.getStruct().getTypeId() == type.getStruct().getTypeId(), |
1009 | "type changed to incompatible struct type" ); |
1010 | return; |
1011 | |
1012 | case schema::Type::INTERFACE: |
1013 | VALIDATE_SCHEMA(replacement.getInterface().getTypeId() == type.getInterface().getTypeId(), |
1014 | "type changed to incompatible interface type" ); |
1015 | return; |
1016 | } |
1017 | |
1018 | // We assume unknown types (from newer versions of Cap'n Proto?) are equivalent. |
1019 | } |
1020 | |
1021 | void checkUpgradeToStruct(const schema::Type::Reader& type, uint64_t structTypeId, |
1022 | kj::Maybe<schema::Node::Reader> matchSize = nullptr, |
1023 | kj::Maybe<schema::Field::Reader> matchPosition = nullptr) { |
1024 | // We can't just look up the target struct and check it because it may not have been loaded |
1025 | // yet. Instead, we contrive a struct that looks like what we want and load() that, which |
1026 | // guarantees that any incompatibility will be caught either now or when the real version of |
1027 | // that struct is loaded. |
1028 | |
1029 | word scratch[32]; |
1030 | memset(scratch, 0, sizeof(scratch)); |
1031 | MallocMessageBuilder builder(scratch); |
1032 | auto node = builder.initRoot<schema::Node>(); |
1033 | node.setId(structTypeId); |
1034 | node.setDisplayName(kj::str("(unknown type used in " , nodeName, ")" )); |
1035 | auto structNode = node.initStruct(); |
1036 | |
1037 | switch (type.which()) { |
1038 | case schema::Type::VOID: |
1039 | structNode.setDataWordCount(0); |
1040 | structNode.setPointerCount(0); |
1041 | break; |
1042 | |
1043 | case schema::Type::BOOL: |
1044 | structNode.setDataWordCount(1); |
1045 | structNode.setPointerCount(0); |
1046 | break; |
1047 | |
1048 | case schema::Type::INT8: |
1049 | case schema::Type::UINT8: |
1050 | structNode.setDataWordCount(1); |
1051 | structNode.setPointerCount(0); |
1052 | break; |
1053 | |
1054 | case schema::Type::INT16: |
1055 | case schema::Type::UINT16: |
1056 | case schema::Type::ENUM: |
1057 | structNode.setDataWordCount(1); |
1058 | structNode.setPointerCount(0); |
1059 | break; |
1060 | |
1061 | case schema::Type::INT32: |
1062 | case schema::Type::UINT32: |
1063 | case schema::Type::FLOAT32: |
1064 | structNode.setDataWordCount(1); |
1065 | structNode.setPointerCount(0); |
1066 | break; |
1067 | |
1068 | case schema::Type::INT64: |
1069 | case schema::Type::UINT64: |
1070 | case schema::Type::FLOAT64: |
1071 | structNode.setDataWordCount(1); |
1072 | structNode.setPointerCount(0); |
1073 | break; |
1074 | |
1075 | case schema::Type::TEXT: |
1076 | case schema::Type::DATA: |
1077 | case schema::Type::LIST: |
1078 | case schema::Type::STRUCT: |
1079 | case schema::Type::INTERFACE: |
1080 | case schema::Type::ANY_POINTER: |
1081 | structNode.setDataWordCount(0); |
1082 | structNode.setPointerCount(1); |
1083 | break; |
1084 | } |
1085 | |
1086 | KJ_IF_MAYBE(s, matchSize) { |
1087 | auto match = s->getStruct(); |
1088 | structNode.setDataWordCount(match.getDataWordCount()); |
1089 | structNode.setPointerCount(match.getPointerCount()); |
1090 | } |
1091 | |
1092 | auto field = structNode.initFields(1)[0]; |
1093 | field.setName("member0" ); |
1094 | field.setCodeOrder(0); |
1095 | auto slot = field.initSlot(); |
1096 | slot.setType(type); |
1097 | |
1098 | KJ_IF_MAYBE(p, matchPosition) { |
1099 | if (p->getOrdinal().isExplicit()) { |
1100 | field.getOrdinal().setExplicit(p->getOrdinal().getExplicit()); |
1101 | } else { |
1102 | field.getOrdinal().setImplicit(); |
1103 | } |
1104 | auto matchSlot = p->getSlot(); |
1105 | slot.setOffset(matchSlot.getOffset()); |
1106 | slot.setDefaultValue(matchSlot.getDefaultValue()); |
1107 | } else { |
1108 | field.getOrdinal().setExplicit(0); |
1109 | slot.setOffset(0); |
1110 | |
1111 | schema::Value::Builder value = slot.initDefaultValue(); |
1112 | switch (type.which()) { |
1113 | case schema::Type::VOID: value.setVoid(); break; |
1114 | case schema::Type::BOOL: value.setBool(false); break; |
1115 | case schema::Type::INT8: value.setInt8(0); break; |
1116 | case schema::Type::INT16: value.setInt16(0); break; |
1117 | case schema::Type::INT32: value.setInt32(0); break; |
1118 | case schema::Type::INT64: value.setInt64(0); break; |
1119 | case schema::Type::UINT8: value.setUint8(0); break; |
1120 | case schema::Type::UINT16: value.setUint16(0); break; |
1121 | case schema::Type::UINT32: value.setUint32(0); break; |
1122 | case schema::Type::UINT64: value.setUint64(0); break; |
1123 | case schema::Type::FLOAT32: value.setFloat32(0); break; |
1124 | case schema::Type::FLOAT64: value.setFloat64(0); break; |
1125 | case schema::Type::ENUM: value.setEnum(0); break; |
1126 | case schema::Type::TEXT: value.adoptText(Orphan<Text>()); break; |
1127 | case schema::Type::DATA: value.adoptData(Orphan<Data>()); break; |
1128 | case schema::Type::LIST: value.initList(); break; |
1129 | case schema::Type::STRUCT: value.initStruct(); break; |
1130 | case schema::Type::INTERFACE: value.setInterface(); break; |
1131 | case schema::Type::ANY_POINTER: value.initAnyPointer(); break; |
1132 | } |
1133 | } |
1134 | |
1135 | loader.load(node, true); |
1136 | } |
1137 | |
1138 | bool canUpgradeToData(const schema::Type::Reader& type) { |
1139 | if (type.isText()) { |
1140 | return true; |
1141 | } else if (type.isList()) { |
1142 | switch (type.getList().getElementType().which()) { |
1143 | case schema::Type::INT8: |
1144 | case schema::Type::UINT8: |
1145 | return true; |
1146 | default: |
1147 | return false; |
1148 | } |
1149 | } else { |
1150 | return false; |
1151 | } |
1152 | } |
1153 | |
1154 | bool canUpgradeToAnyPointer(const schema::Type::Reader& type) { |
1155 | switch (type.which()) { |
1156 | case schema::Type::VOID: |
1157 | case schema::Type::BOOL: |
1158 | case schema::Type::INT8: |
1159 | case schema::Type::INT16: |
1160 | case schema::Type::INT32: |
1161 | case schema::Type::INT64: |
1162 | case schema::Type::UINT8: |
1163 | case schema::Type::UINT16: |
1164 | case schema::Type::UINT32: |
1165 | case schema::Type::UINT64: |
1166 | case schema::Type::FLOAT32: |
1167 | case schema::Type::FLOAT64: |
1168 | case schema::Type::ENUM: |
1169 | return false; |
1170 | |
1171 | case schema::Type::TEXT: |
1172 | case schema::Type::DATA: |
1173 | case schema::Type::LIST: |
1174 | case schema::Type::STRUCT: |
1175 | case schema::Type::INTERFACE: |
1176 | case schema::Type::ANY_POINTER: |
1177 | return true; |
1178 | } |
1179 | |
1180 | // Be lenient with unknown types. |
1181 | return true; |
1182 | } |
1183 | |
1184 | void checkDefaultCompatibility(const schema::Value::Reader& value, |
1185 | const schema::Value::Reader& replacement) { |
1186 | // Note that we test default compatibility only after testing type compatibility, and default |
1187 | // values have already been validated as matching their types, so this should pass. |
1188 | KJ_ASSERT(value.which() == replacement.which()) { |
1189 | compatibility = INCOMPATIBLE; |
1190 | return; |
1191 | } |
1192 | |
1193 | switch (value.which()) { |
1194 | #define HANDLE_TYPE(discrim, name) \ |
1195 | case schema::Value::discrim: \ |
1196 | VALIDATE_SCHEMA(value.get##name() == replacement.get##name(), "default value changed"); \ |
1197 | break; |
1198 | HANDLE_TYPE(VOID, Void); |
1199 | HANDLE_TYPE(BOOL, Bool); |
1200 | HANDLE_TYPE(INT8, Int8); |
1201 | HANDLE_TYPE(INT16, Int16); |
1202 | HANDLE_TYPE(INT32, Int32); |
1203 | HANDLE_TYPE(INT64, Int64); |
1204 | HANDLE_TYPE(UINT8, Uint8); |
1205 | HANDLE_TYPE(UINT16, Uint16); |
1206 | HANDLE_TYPE(UINT32, Uint32); |
1207 | HANDLE_TYPE(UINT64, Uint64); |
1208 | HANDLE_TYPE(FLOAT32, Float32); |
1209 | HANDLE_TYPE(FLOAT64, Float64); |
1210 | HANDLE_TYPE(ENUM, Enum); |
1211 | #undef HANDLE_TYPE |
1212 | |
1213 | case schema::Value::TEXT: |
1214 | case schema::Value::DATA: |
1215 | case schema::Value::LIST: |
1216 | case schema::Value::STRUCT: |
1217 | case schema::Value::INTERFACE: |
1218 | case schema::Value::ANY_POINTER: |
1219 | // It's not a big deal if default values for pointers change, and it would be difficult for |
1220 | // us to compare these defaults here, so just let it slide. |
1221 | break; |
1222 | } |
1223 | } |
1224 | }; |
1225 | |
1226 | // ======================================================================================= |
1227 | |
1228 | _::RawSchema* SchemaLoader::Impl::load(const schema::Node::Reader& reader, bool isPlaceholder) { |
1229 | // Make a copy of the node which can be used unchecked. |
1230 | kj::ArrayPtr<word> validated = makeUncheckedNodeEnforcingSizeRequirements(reader); |
1231 | |
1232 | // Validate the copy. |
1233 | Validator validator(*this); |
1234 | auto validatedReader = readMessageUnchecked<schema::Node>(validated.begin()); |
1235 | |
1236 | if (!validator.validate(validatedReader)) { |
1237 | // Not valid. Construct an empty schema of the same type and return that. |
1238 | return loadEmpty(validatedReader.getId(), |
1239 | validatedReader.getDisplayName(), |
1240 | validatedReader.which(), |
1241 | false); |
1242 | } |
1243 | |
1244 | // Check if we already have a schema for this ID. |
1245 | _::RawSchema* schema; |
1246 | bool shouldReplace; |
1247 | bool shouldClearInitializer; |
1248 | KJ_IF_MAYBE(match, schemas.find(validatedReader.getId())) { |
1249 | // Yes, check if it is compatible and figure out which schema is newer. |
1250 | |
1251 | schema = *match; |
1252 | |
1253 | // If the existing schema is a placeholder, but we're upgrading it to a non-placeholder, we |
1254 | // need to clear the initializer later. |
1255 | shouldClearInitializer = schema->lazyInitializer != nullptr && !isPlaceholder; |
1256 | |
1257 | auto existing = readMessageUnchecked<schema::Node>(schema->encodedNode); |
1258 | CompatibilityChecker checker(*this); |
1259 | |
1260 | // Prefer to replace the existing schema if the existing schema is a placeholder. Otherwise, |
1261 | // prefer to keep the existing schema. |
1262 | shouldReplace = checker.shouldReplace( |
1263 | existing, validatedReader, schema->lazyInitializer != nullptr); |
1264 | } else { |
1265 | // Nope, allocate a new RawSchema. |
1266 | schema = &arena.allocate<_::RawSchema>(); |
1267 | memset(&schema->defaultBrand, 0, sizeof(schema->defaultBrand)); |
1268 | schema->id = validatedReader.getId(); |
1269 | schema->canCastTo = nullptr; |
1270 | schema->defaultBrand.generic = schema; |
1271 | schema->lazyInitializer = isPlaceholder ? &initializer : nullptr; |
1272 | schema->defaultBrand.lazyInitializer = isPlaceholder ? &brandedInitializer : nullptr; |
1273 | shouldReplace = true; |
1274 | shouldClearInitializer = false; |
1275 | schemas.insert(validatedReader.getId(), schema); |
1276 | } |
1277 | |
1278 | if (shouldReplace) { |
1279 | // Initialize the RawSchema. |
1280 | schema->encodedNode = validated.begin(); |
1281 | schema->encodedSize = validated.size(); |
1282 | schema->dependencies = validator.makeDependencyArray(&schema->dependencyCount); |
1283 | schema->membersByName = validator.makeMemberInfoArray(&schema->memberCount); |
1284 | schema->membersByDiscriminant = validator.makeMembersByDiscriminantArray(); |
1285 | |
1286 | // Even though this schema isn't itself branded, it may have dependencies that are. So, we |
1287 | // need to set up the "dependencies" map under defaultBrand. |
1288 | auto deps = makeBrandedDependencies(schema, kj::ArrayPtr<const _::RawBrandedSchema::Scope>()); |
1289 | schema->defaultBrand.dependencies = deps.begin(); |
1290 | schema->defaultBrand.dependencyCount = deps.size(); |
1291 | } |
1292 | |
1293 | if (shouldClearInitializer) { |
1294 | // If this schema is not newly-allocated, it may already be in the wild, specifically in the |
1295 | // dependency list of other schemas. Once the initializer is null, it is live, so we must do |
1296 | // a release-store here. |
1297 | #if __GNUC__ |
1298 | __atomic_store_n(&schema->lazyInitializer, nullptr, __ATOMIC_RELEASE); |
1299 | __atomic_store_n(&schema->defaultBrand.lazyInitializer, nullptr, __ATOMIC_RELEASE); |
1300 | #elif _MSC_VER |
1301 | std::atomic_thread_fence(std::memory_order_release); |
1302 | *static_cast<_::RawSchema::Initializer const* volatile*>(&schema->lazyInitializer) = nullptr; |
1303 | *static_cast<_::RawBrandedSchema::Initializer const* volatile*>( |
1304 | &schema->defaultBrand.lazyInitializer) = nullptr; |
1305 | #else |
1306 | #error "Platform not supported" |
1307 | #endif |
1308 | } |
1309 | |
1310 | return schema; |
1311 | } |
1312 | |
1313 | _::RawSchema* SchemaLoader::Impl::loadNative(const _::RawSchema* nativeSchema) { |
1314 | _::RawSchema* schema; |
1315 | bool shouldReplace; |
1316 | bool shouldClearInitializer; |
1317 | KJ_IF_MAYBE(match, schemas.find(nativeSchema->id)) { |
1318 | schema = *match; |
1319 | if (schema->canCastTo != nullptr) { |
1320 | // Already loaded natively, or we're currently in the process of loading natively and there |
1321 | // was a dependency cycle. |
1322 | KJ_REQUIRE(schema->canCastTo == nativeSchema, |
1323 | "two different compiled-in type have the same type ID" , |
1324 | nativeSchema->id, |
1325 | readMessageUnchecked<schema::Node>(nativeSchema->encodedNode).getDisplayName(), |
1326 | readMessageUnchecked<schema::Node>(schema->canCastTo->encodedNode).getDisplayName()); |
1327 | return schema; |
1328 | } else { |
1329 | auto existing = readMessageUnchecked<schema::Node>(schema->encodedNode); |
1330 | auto native = readMessageUnchecked<schema::Node>(nativeSchema->encodedNode); |
1331 | CompatibilityChecker checker(*this); |
1332 | shouldReplace = checker.shouldReplace(existing, native, true); |
1333 | shouldClearInitializer = schema->lazyInitializer != nullptr; |
1334 | } |
1335 | } else { |
1336 | schema = &arena.allocate<_::RawSchema>(); |
1337 | memset(&schema->defaultBrand, 0, sizeof(schema->defaultBrand)); |
1338 | schema->defaultBrand.generic = schema; |
1339 | schema->lazyInitializer = nullptr; |
1340 | schema->defaultBrand.lazyInitializer = nullptr; |
1341 | shouldReplace = true; |
1342 | shouldClearInitializer = false; // already cleared above |
1343 | schemas.insert(nativeSchema->id, schema); |
1344 | } |
1345 | |
1346 | if (shouldReplace) { |
1347 | // Set the schema to a copy of the native schema, but make sure not to null out lazyInitializer |
1348 | // yet. |
1349 | _::RawSchema temp = *nativeSchema; |
1350 | temp.lazyInitializer = schema->lazyInitializer; |
1351 | *schema = temp; |
1352 | |
1353 | schema->defaultBrand.generic = schema; |
1354 | |
1355 | // Indicate that casting is safe. Note that it's important to set this before recursively |
1356 | // loading dependencies, so that cycles don't cause infinite loops! |
1357 | schema->canCastTo = nativeSchema; |
1358 | |
1359 | // We need to set the dependency list to point at other loader-owned RawSchemas. |
1360 | kj::ArrayPtr<const _::RawSchema*> dependencies = |
1361 | arena.allocateArray<const _::RawSchema*>(schema->dependencyCount); |
1362 | for (uint i = 0; i < nativeSchema->dependencyCount; i++) { |
1363 | dependencies[i] = loadNative(nativeSchema->dependencies[i]); |
1364 | } |
1365 | schema->dependencies = dependencies.begin(); |
1366 | |
1367 | // Also need to re-do the branded dependencies. |
1368 | auto deps = makeBrandedDependencies(schema, kj::ArrayPtr<const _::RawBrandedSchema::Scope>()); |
1369 | schema->defaultBrand.dependencies = deps.begin(); |
1370 | schema->defaultBrand.dependencyCount = deps.size(); |
1371 | |
1372 | // If there is a struct size requirement, we need to make sure that it is satisfied. |
1373 | KJ_IF_MAYBE(sizeReq, structSizeRequirements.find(nativeSchema->id)) { |
1374 | applyStructSizeRequirement(schema, sizeReq->dataWordCount, |
1375 | sizeReq->pointerCount); |
1376 | } |
1377 | } else { |
1378 | // The existing schema is newer. |
1379 | |
1380 | // Indicate that casting is safe. Note that it's important to set this before recursively |
1381 | // loading dependencies, so that cycles don't cause infinite loops! |
1382 | schema->canCastTo = nativeSchema; |
1383 | |
1384 | // Make sure the dependencies are loaded and compatible. |
1385 | for (uint i = 0; i < nativeSchema->dependencyCount; i++) { |
1386 | loadNative(nativeSchema->dependencies[i]); |
1387 | } |
1388 | } |
1389 | |
1390 | if (shouldClearInitializer) { |
1391 | // If this schema is not newly-allocated, it may already be in the wild, specifically in the |
1392 | // dependency list of other schemas. Once the initializer is null, it is live, so we must do |
1393 | // a release-store here. |
1394 | #if __GNUC__ |
1395 | __atomic_store_n(&schema->lazyInitializer, nullptr, __ATOMIC_RELEASE); |
1396 | __atomic_store_n(&schema->defaultBrand.lazyInitializer, nullptr, __ATOMIC_RELEASE); |
1397 | #elif _MSC_VER |
1398 | std::atomic_thread_fence(std::memory_order_release); |
1399 | *static_cast<_::RawSchema::Initializer const* volatile*>(&schema->lazyInitializer) = nullptr; |
1400 | *static_cast<_::RawBrandedSchema::Initializer const* volatile*>( |
1401 | &schema->defaultBrand.lazyInitializer) = nullptr; |
1402 | #else |
1403 | #error "Platform not supported" |
1404 | #endif |
1405 | } |
1406 | |
1407 | return schema; |
1408 | } |
1409 | |
1410 | _::RawSchema* SchemaLoader::Impl::loadEmpty( |
1411 | uint64_t id, kj::StringPtr name, schema::Node::Which kind, bool isPlaceholder) { |
1412 | word scratch[32]; |
1413 | memset(scratch, 0, sizeof(scratch)); |
1414 | MallocMessageBuilder builder(scratch); |
1415 | auto node = builder.initRoot<schema::Node>(); |
1416 | node.setId(id); |
1417 | node.setDisplayName(name); |
1418 | switch (kind) { |
1419 | case schema::Node::STRUCT: node.initStruct(); break; |
1420 | case schema::Node::ENUM: node.initEnum(); break; |
1421 | case schema::Node::INTERFACE: node.initInterface(); break; |
1422 | |
1423 | case schema::Node::FILE: |
1424 | case schema::Node::CONST: |
1425 | case schema::Node::ANNOTATION: |
1426 | KJ_FAIL_REQUIRE("Not a type." ); |
1427 | break; |
1428 | } |
1429 | |
1430 | return load(node, isPlaceholder); |
1431 | } |
1432 | |
1433 | const _::RawBrandedSchema* SchemaLoader::Impl::makeBranded( |
1434 | const _::RawSchema* schema, schema::Brand::Reader proto, |
1435 | kj::Maybe<kj::ArrayPtr<const _::RawBrandedSchema::Scope>> clientBrand) { |
1436 | kj::StringPtr scopeName = |
1437 | readMessageUnchecked<schema::Node>(schema->encodedNode).getDisplayName(); |
1438 | |
1439 | auto srcScopes = proto.getScopes(); |
1440 | |
1441 | KJ_STACK_ARRAY(_::RawBrandedSchema::Scope, dstScopes, srcScopes.size(), 16, 32); |
1442 | memset(dstScopes.begin(), 0, dstScopes.size() * sizeof(dstScopes[0])); |
1443 | |
1444 | uint dstScopeCount = 0; |
1445 | for (auto srcScope: srcScopes) { |
1446 | switch (srcScope.which()) { |
1447 | case schema::Brand::Scope::BIND: { |
1448 | auto srcBindings = srcScope.getBind(); |
1449 | KJ_STACK_ARRAY(_::RawBrandedSchema::Binding, dstBindings, srcBindings.size(), 16, 32); |
1450 | memset(dstBindings.begin(), 0, dstBindings.size() * sizeof(dstBindings[0])); |
1451 | |
1452 | for (auto j: kj::indices(srcBindings)) { |
1453 | auto srcBinding = srcBindings[j]; |
1454 | auto& dstBinding = dstBindings[j]; |
1455 | |
1456 | memset(&dstBinding, 0, sizeof(dstBinding)); |
1457 | dstBinding.which = schema::Type::ANY_POINTER; |
1458 | |
1459 | switch (srcBinding.which()) { |
1460 | case schema::Brand::Binding::UNBOUND: |
1461 | break; |
1462 | case schema::Brand::Binding::TYPE: { |
1463 | makeDep(dstBinding, srcBinding.getType(), scopeName, clientBrand); |
1464 | break; |
1465 | } |
1466 | } |
1467 | } |
1468 | |
1469 | auto& dstScope = dstScopes[dstScopeCount++]; |
1470 | dstScope.typeId = srcScope.getScopeId(); |
1471 | dstScope.bindingCount = dstBindings.size(); |
1472 | dstScope.bindings = copyDeduped(dstBindings).begin(); |
1473 | break; |
1474 | } |
1475 | case schema::Brand::Scope::INHERIT: { |
1476 | // Inherit the whole scope from the client -- or if the client doesn't have it, at least |
1477 | // include an empty dstScope in the list just to show that this scope was specified as |
1478 | // inherited, as opposed to being unspecified (which would be treated as all AnyPointer). |
1479 | auto& dstScope = dstScopes[dstScopeCount++]; |
1480 | dstScope.typeId = srcScope.getScopeId(); |
1481 | |
1482 | KJ_IF_MAYBE(b, clientBrand) { |
1483 | for (auto& clientScope: *b) { |
1484 | if (clientScope.typeId == dstScope.typeId) { |
1485 | // Overwrite the whole thing. |
1486 | dstScope = clientScope; |
1487 | break; |
1488 | } |
1489 | } |
1490 | } else { |
1491 | dstScope.isUnbound = true; |
1492 | } |
1493 | break; |
1494 | } |
1495 | } |
1496 | } |
1497 | |
1498 | dstScopes = dstScopes.slice(0, dstScopeCount); |
1499 | |
1500 | std::sort(dstScopes.begin(), dstScopes.end(), |
1501 | [](const _::RawBrandedSchema::Scope& a, const _::RawBrandedSchema::Scope& b) { |
1502 | return a.typeId < b.typeId; |
1503 | }); |
1504 | |
1505 | return makeBranded(schema, copyDeduped(dstScopes)); |
1506 | } |
1507 | |
1508 | const _::RawBrandedSchema* SchemaLoader::Impl::makeBranded( |
1509 | const _::RawSchema* schema, kj::ArrayPtr<const _::RawBrandedSchema::Scope> bindings) { |
1510 | // Note that even if `bindings` is empty, we never want to return defaultBrand here because |
1511 | // defaultBrand has special status. Normally, the lack of bindings means all parameters are |
1512 | // "unspecified", which means their bindings are unknown and should be treated as AnyPointer. |
1513 | // But defaultBrand represents a special case where all parameters are still parameters -- they |
1514 | // haven't been bound in the first place. defaultBrand is used to represent the unbranded generic |
1515 | // type, while a no-binding brand is equivalent to binding all parameters to AnyPointer. |
1516 | |
1517 | if (bindings.size() == 0) { |
1518 | return &schema->defaultBrand; |
1519 | } |
1520 | |
1521 | SchemaBindingsPair key { schema, bindings.begin() }; |
1522 | KJ_IF_MAYBE(existing, brands.find(key)) { |
1523 | return *existing; |
1524 | } else { |
1525 | auto& brand = arena.allocate<_::RawBrandedSchema>(); |
1526 | memset(&brand, 0, sizeof(brand)); |
1527 | brands.insert(key, &brand); |
1528 | |
1529 | brand.generic = schema; |
1530 | brand.scopes = bindings.begin(); |
1531 | brand.scopeCount = bindings.size(); |
1532 | brand.lazyInitializer = &brandedInitializer; |
1533 | return &brand; |
1534 | } |
1535 | } |
1536 | |
1537 | kj::ArrayPtr<const _::RawBrandedSchema::Dependency> |
1538 | SchemaLoader::Impl::makeBrandedDependencies( |
1539 | const _::RawSchema* schema, |
1540 | kj::Maybe<kj::ArrayPtr<const _::RawBrandedSchema::Scope>> bindings) { |
1541 | kj::StringPtr scopeName = |
1542 | readMessageUnchecked<schema::Node>(schema->encodedNode).getDisplayName(); |
1543 | |
1544 | kj::Vector<_::RawBrandedSchema::Dependency> deps; |
1545 | |
1546 | schema::Node::Reader node = readMessageUnchecked<schema::Node>(schema->encodedNode); |
1547 | |
1548 | #define ADD_ENTRY(kind, index, make) \ |
1549 | if (const _::RawBrandedSchema* dep = make) { \ |
1550 | auto& slot = deps.add(); \ |
1551 | memset(&slot, 0, sizeof(slot)); \ |
1552 | slot.location = _::RawBrandedSchema::makeDepLocation( \ |
1553 | _::RawBrandedSchema::DepKind::kind, index); \ |
1554 | slot.schema = dep; \ |
1555 | } |
1556 | |
1557 | switch (node.which()) { |
1558 | case schema::Node::FILE: |
1559 | case schema::Node::ENUM: |
1560 | case schema::Node::ANNOTATION: |
1561 | break; |
1562 | |
1563 | case schema::Node::CONST: |
1564 | ADD_ENTRY(CONST_TYPE, 0, makeDepSchema( |
1565 | node.getConst().getType(), scopeName, bindings)); |
1566 | break; |
1567 | |
1568 | case schema::Node::STRUCT: { |
1569 | auto fields = node.getStruct().getFields(); |
1570 | for (auto i: kj::indices(fields)) { |
1571 | auto field = fields[i]; |
1572 | switch (field.which()) { |
1573 | case schema::Field::SLOT: |
1574 | ADD_ENTRY(FIELD, i, makeDepSchema( |
1575 | field.getSlot().getType(), scopeName, bindings)) |
1576 | break; |
1577 | case schema::Field::GROUP: { |
1578 | const _::RawSchema* group = loadEmpty( |
1579 | field.getGroup().getTypeId(), |
1580 | "(unknown group type)" , schema::Node::STRUCT, true); |
1581 | KJ_IF_MAYBE(b, bindings) { |
1582 | ADD_ENTRY(FIELD, i, makeBranded(group, *b)); |
1583 | } else { |
1584 | ADD_ENTRY(FIELD, i, getUnbound(group)); |
1585 | } |
1586 | break; |
1587 | } |
1588 | } |
1589 | } |
1590 | break; |
1591 | } |
1592 | |
1593 | case schema::Node::INTERFACE: { |
1594 | auto interface = node.getInterface(); |
1595 | { |
1596 | auto superclasses = interface.getSuperclasses(); |
1597 | for (auto i: kj::indices(superclasses)) { |
1598 | auto superclass = superclasses[i]; |
1599 | ADD_ENTRY(SUPERCLASS, i, makeDepSchema( |
1600 | superclass.getId(), schema::Type::INTERFACE, schema::Node::INTERFACE, |
1601 | superclass.getBrand(), scopeName, bindings)) |
1602 | } |
1603 | } |
1604 | { |
1605 | auto methods = interface.getMethods(); |
1606 | for (auto i: kj::indices(methods)) { |
1607 | auto method = methods[i]; |
1608 | ADD_ENTRY(METHOD_PARAMS, i, makeDepSchema( |
1609 | method.getParamStructType(), schema::Type::STRUCT, schema::Node::STRUCT, |
1610 | method.getParamBrand(), scopeName, bindings)) |
1611 | ADD_ENTRY(METHOD_RESULTS, i, makeDepSchema( |
1612 | method.getResultStructType(), schema::Type::STRUCT, schema::Node::STRUCT, |
1613 | method.getResultBrand(), scopeName, bindings)) |
1614 | } |
1615 | } |
1616 | break; |
1617 | } |
1618 | } |
1619 | |
1620 | #undef ADD_ENTRY |
1621 | |
1622 | std::sort(deps.begin(), deps.end(), |
1623 | [](const _::RawBrandedSchema::Dependency& a, const _::RawBrandedSchema::Dependency& b) { |
1624 | return a.location < b.location; |
1625 | }); |
1626 | |
1627 | return copyDeduped(deps.asPtr()); |
1628 | } |
1629 | |
1630 | void SchemaLoader::Impl::makeDep(_::RawBrandedSchema::Binding& result, |
1631 | schema::Type::Reader type, kj::StringPtr scopeName, |
1632 | kj::Maybe<kj::ArrayPtr<const _::RawBrandedSchema::Scope>> brandBindings) { |
1633 | switch (type.which()) { |
1634 | case schema::Type::VOID: |
1635 | case schema::Type::BOOL: |
1636 | case schema::Type::INT8: |
1637 | case schema::Type::INT16: |
1638 | case schema::Type::INT32: |
1639 | case schema::Type::INT64: |
1640 | case schema::Type::UINT8: |
1641 | case schema::Type::UINT16: |
1642 | case schema::Type::UINT32: |
1643 | case schema::Type::UINT64: |
1644 | case schema::Type::FLOAT32: |
1645 | case schema::Type::FLOAT64: |
1646 | case schema::Type::TEXT: |
1647 | case schema::Type::DATA: |
1648 | result.which = static_cast<uint8_t>(type.which()); |
1649 | return; |
1650 | |
1651 | case schema::Type::STRUCT: { |
1652 | auto structType = type.getStruct(); |
1653 | makeDep(result, structType.getTypeId(), schema::Type::STRUCT, schema::Node::STRUCT, |
1654 | structType.getBrand(), scopeName, brandBindings); |
1655 | return; |
1656 | } |
1657 | case schema::Type::ENUM: { |
1658 | auto enumType = type.getEnum(); |
1659 | makeDep(result, enumType.getTypeId(), schema::Type::ENUM, schema::Node::ENUM, |
1660 | enumType.getBrand(), scopeName, brandBindings); |
1661 | return; |
1662 | } |
1663 | case schema::Type::INTERFACE: { |
1664 | auto interfaceType = type.getInterface(); |
1665 | makeDep(result, interfaceType.getTypeId(), schema::Type::INTERFACE, schema::Node::INTERFACE, |
1666 | interfaceType.getBrand(), scopeName, brandBindings); |
1667 | return; |
1668 | } |
1669 | |
1670 | case schema::Type::LIST: { |
1671 | makeDep(result, type.getList().getElementType(), scopeName, brandBindings); |
1672 | ++result.listDepth; |
1673 | return; |
1674 | } |
1675 | |
1676 | case schema::Type::ANY_POINTER: { |
1677 | result.which = static_cast<uint8_t>(schema::Type::ANY_POINTER); |
1678 | auto anyPointer = type.getAnyPointer(); |
1679 | switch (anyPointer.which()) { |
1680 | case schema::Type::AnyPointer::UNCONSTRAINED: |
1681 | return; |
1682 | case schema::Type::AnyPointer::PARAMETER: { |
1683 | auto param = anyPointer.getParameter(); |
1684 | uint64_t id = param.getScopeId(); |
1685 | uint16_t index = param.getParameterIndex(); |
1686 | |
1687 | KJ_IF_MAYBE(b, brandBindings) { |
1688 | // TODO(perf): We could binary search here, but... bleh. |
1689 | for (auto& scope: *b) { |
1690 | if (scope.typeId == id) { |
1691 | if (scope.isUnbound) { |
1692 | // Unbound brand parameter. |
1693 | result.scopeId = id; |
1694 | result.paramIndex = index; |
1695 | return; |
1696 | } else if (index >= scope.bindingCount) { |
1697 | // Binding index out-of-range. Treat as AnyPointer. This is important to allow |
1698 | // new type parameters to be added to existing types without breaking dependent |
1699 | // schemas. |
1700 | return; |
1701 | } else { |
1702 | result = scope.bindings[index]; |
1703 | return; |
1704 | } |
1705 | } |
1706 | } |
1707 | return; |
1708 | } else { |
1709 | // Unbound brand parameter. |
1710 | result.scopeId = id; |
1711 | result.paramIndex = index; |
1712 | return; |
1713 | } |
1714 | } |
1715 | case schema::Type::AnyPointer::IMPLICIT_METHOD_PARAMETER: |
1716 | result.isImplicitParameter = true; |
1717 | result.paramIndex = anyPointer.getImplicitMethodParameter().getParameterIndex(); |
1718 | return; |
1719 | } |
1720 | KJ_UNREACHABLE; |
1721 | } |
1722 | } |
1723 | |
1724 | KJ_UNREACHABLE; |
1725 | } |
1726 | |
1727 | void SchemaLoader::Impl::makeDep(_::RawBrandedSchema::Binding& result, |
1728 | uint64_t typeId, schema::Type::Which whichType, schema::Node::Which expectedKind, |
1729 | schema::Brand::Reader brand, kj::StringPtr scopeName, |
1730 | kj::Maybe<kj::ArrayPtr<const _::RawBrandedSchema::Scope>> brandBindings) { |
1731 | const _::RawSchema* schema = loadEmpty(typeId, |
1732 | kj::str("(unknown type; seen as dependency of " , scopeName, ")" ), |
1733 | expectedKind, true); |
1734 | result.which = static_cast<uint8_t>(whichType); |
1735 | result.schema = makeBranded(schema, brand, brandBindings); |
1736 | } |
1737 | |
1738 | const _::RawBrandedSchema* SchemaLoader::Impl::makeDepSchema( |
1739 | schema::Type::Reader type, kj::StringPtr scopeName, |
1740 | kj::Maybe<kj::ArrayPtr<const _::RawBrandedSchema::Scope>> brandBindings) { |
1741 | _::RawBrandedSchema::Binding binding; |
1742 | memset(&binding, 0, sizeof(binding)); |
1743 | makeDep(binding, type, scopeName, brandBindings); |
1744 | return binding.schema; |
1745 | } |
1746 | |
1747 | const _::RawBrandedSchema* SchemaLoader::Impl::makeDepSchema( |
1748 | uint64_t typeId, schema::Type::Which whichType, schema::Node::Which expectedKind, |
1749 | schema::Brand::Reader brand, kj::StringPtr scopeName, |
1750 | kj::Maybe<kj::ArrayPtr<const _::RawBrandedSchema::Scope>> brandBindings) { |
1751 | _::RawBrandedSchema::Binding binding; |
1752 | memset(&binding, 0, sizeof(binding)); |
1753 | makeDep(binding, typeId, whichType, expectedKind, brand, scopeName, brandBindings); |
1754 | return binding.schema; |
1755 | } |
1756 | |
1757 | template <typename T> |
1758 | kj::ArrayPtr<const T> SchemaLoader::Impl::copyDeduped(kj::ArrayPtr<const T> values) { |
1759 | if (values.size() == 0) { |
1760 | return kj::arrayPtr(kj::implicitCast<const T*>(nullptr), 0); |
1761 | } |
1762 | |
1763 | auto bytes = values.asBytes(); |
1764 | |
1765 | KJ_IF_MAYBE(dupe, dedupTable.find(bytes)) { |
1766 | return kj::arrayPtr(reinterpret_cast<const T*>(dupe->begin()), values.size()); |
1767 | } |
1768 | |
1769 | // Need to make a new copy. |
1770 | auto copy = arena.allocateArray<T>(values.size()); |
1771 | memcpy(copy.begin(), values.begin(), values.size() * sizeof(T)); |
1772 | |
1773 | dedupTable.insert(copy.asBytes()); |
1774 | |
1775 | return copy; |
1776 | } |
1777 | |
1778 | template <typename T> |
1779 | kj::ArrayPtr<const T> SchemaLoader::Impl::copyDeduped(kj::ArrayPtr<T> values) { |
1780 | return copyDeduped(kj::ArrayPtr<const T>(values)); |
1781 | } |
1782 | |
1783 | SchemaLoader::Impl::TryGetResult SchemaLoader::Impl::tryGet(uint64_t typeId) const { |
1784 | KJ_IF_MAYBE(schema, schemas.find(typeId)) { |
1785 | return {*schema, initializer.getCallback()}; |
1786 | } else { |
1787 | return {nullptr, initializer.getCallback()}; |
1788 | } |
1789 | } |
1790 | |
1791 | const _::RawBrandedSchema* SchemaLoader::Impl::getUnbound(const _::RawSchema* schema) { |
1792 | if (!readMessageUnchecked<schema::Node>(schema->encodedNode).getIsGeneric()) { |
1793 | // Not a generic type, so just return the default brand. |
1794 | return &schema->defaultBrand; |
1795 | } |
1796 | |
1797 | KJ_IF_MAYBE(existing, unboundBrands.find(schema)) { |
1798 | return *existing; |
1799 | } else { |
1800 | auto slot = &arena.allocate<_::RawBrandedSchema>(); |
1801 | memset(slot, 0, sizeof(*slot)); |
1802 | slot->generic = schema; |
1803 | auto deps = makeBrandedDependencies(schema, nullptr); |
1804 | slot->dependencies = deps.begin(); |
1805 | slot->dependencyCount = deps.size(); |
1806 | unboundBrands.insert(schema, slot); |
1807 | return slot; |
1808 | } |
1809 | } |
1810 | |
1811 | kj::Array<Schema> SchemaLoader::Impl::getAllLoaded() const { |
1812 | size_t count = 0; |
1813 | for (auto& schema: schemas) { |
1814 | if (schema.value->lazyInitializer == nullptr) ++count; |
1815 | } |
1816 | |
1817 | kj::Array<Schema> result = kj::heapArray<Schema>(count); |
1818 | size_t i = 0; |
1819 | for (auto& schema: schemas) { |
1820 | if (schema.value->lazyInitializer == nullptr) { |
1821 | result[i++] = Schema(&schema.value->defaultBrand); |
1822 | } |
1823 | } |
1824 | return result; |
1825 | } |
1826 | |
1827 | void SchemaLoader::Impl::requireStructSize(uint64_t id, uint dataWordCount, uint pointerCount) { |
1828 | structSizeRequirements.upsert(id, { uint16_t(dataWordCount), uint16_t(pointerCount) }, |
1829 | [&](RequiredSize& existingValue, RequiredSize&& newValue) { |
1830 | existingValue.dataWordCount = kj::max(existingValue.dataWordCount, newValue.dataWordCount); |
1831 | existingValue.pointerCount = kj::max(existingValue.pointerCount, newValue.pointerCount); |
1832 | }); |
1833 | |
1834 | KJ_IF_MAYBE(schema, schemas.find(id)) { |
1835 | applyStructSizeRequirement(*schema, dataWordCount, pointerCount); |
1836 | } |
1837 | } |
1838 | |
1839 | kj::ArrayPtr<word> SchemaLoader::Impl::makeUncheckedNode(schema::Node::Reader node) { |
1840 | size_t size = node.totalSize().wordCount + 1; |
1841 | kj::ArrayPtr<word> result = arena.allocateArray<word>(size); |
1842 | memset(result.begin(), 0, size * sizeof(word)); |
1843 | copyToUnchecked(node, result); |
1844 | return result; |
1845 | } |
1846 | |
1847 | kj::ArrayPtr<word> SchemaLoader::Impl::makeUncheckedNodeEnforcingSizeRequirements( |
1848 | schema::Node::Reader node) { |
1849 | if (node.isStruct()) { |
1850 | KJ_IF_MAYBE(requirement, structSizeRequirements.find(node.getId())) { |
1851 | auto structNode = node.getStruct(); |
1852 | if (structNode.getDataWordCount() < requirement->dataWordCount || |
1853 | structNode.getPointerCount() < requirement->pointerCount) { |
1854 | return rewriteStructNodeWithSizes(node, requirement->dataWordCount, |
1855 | requirement->pointerCount); |
1856 | } |
1857 | } |
1858 | } |
1859 | |
1860 | return makeUncheckedNode(node); |
1861 | } |
1862 | |
1863 | kj::ArrayPtr<word> SchemaLoader::Impl::rewriteStructNodeWithSizes( |
1864 | schema::Node::Reader node, uint dataWordCount, uint pointerCount) { |
1865 | MallocMessageBuilder builder; |
1866 | builder.setRoot(node); |
1867 | |
1868 | auto root = builder.getRoot<schema::Node>(); |
1869 | auto newStruct = root.getStruct(); |
1870 | newStruct.setDataWordCount(kj::max(newStruct.getDataWordCount(), dataWordCount)); |
1871 | newStruct.setPointerCount(kj::max(newStruct.getPointerCount(), pointerCount)); |
1872 | |
1873 | return makeUncheckedNode(root); |
1874 | } |
1875 | |
1876 | void SchemaLoader::Impl::applyStructSizeRequirement( |
1877 | _::RawSchema* raw, uint dataWordCount, uint pointerCount) { |
1878 | auto node = readMessageUnchecked<schema::Node>(raw->encodedNode); |
1879 | |
1880 | auto structNode = node.getStruct(); |
1881 | if (structNode.getDataWordCount() < dataWordCount || |
1882 | structNode.getPointerCount() < pointerCount) { |
1883 | // Sizes need to be increased. Must rewrite. |
1884 | kj::ArrayPtr<word> words = rewriteStructNodeWithSizes(node, dataWordCount, pointerCount); |
1885 | |
1886 | // We don't need to re-validate the node because we know this change could not possibly have |
1887 | // invalidated it. Just remake the unchecked message. |
1888 | raw->encodedNode = words.begin(); |
1889 | raw->encodedSize = words.size(); |
1890 | } |
1891 | } |
1892 | |
1893 | void SchemaLoader::InitializerImpl::init(const _::RawSchema* schema) const { |
1894 | KJ_IF_MAYBE(c, callback) { |
1895 | c->load(loader, schema->id); |
1896 | } |
1897 | |
1898 | if (schema->lazyInitializer != nullptr) { |
1899 | // The callback declined to load a schema. We need to disable the initializer so that it |
1900 | // doesn't get invoked again later, as we can no longer modify this schema once it is in use. |
1901 | |
1902 | // Lock the loader for read to make sure no one is concurrently loading a replacement for this |
1903 | // schema node. |
1904 | auto lock = loader.impl.lockShared(); |
1905 | |
1906 | // Get the mutable version of the schema. |
1907 | _::RawSchema* mutableSchema = lock->get()->tryGet(schema->id).schema; |
1908 | KJ_ASSERT(mutableSchema == schema, |
1909 | "A schema not belonging to this loader used its initializer." ); |
1910 | |
1911 | // Disable the initializer. |
1912 | #if __GNUC__ |
1913 | __atomic_store_n(&mutableSchema->lazyInitializer, nullptr, __ATOMIC_RELEASE); |
1914 | __atomic_store_n(&mutableSchema->defaultBrand.lazyInitializer, nullptr, __ATOMIC_RELEASE); |
1915 | #elif _MSC_VER |
1916 | std::atomic_thread_fence(std::memory_order_release); |
1917 | *static_cast<_::RawSchema::Initializer const* volatile*>( |
1918 | &mutableSchema->lazyInitializer) = nullptr; |
1919 | *static_cast<_::RawBrandedSchema::Initializer const* volatile*>( |
1920 | &mutableSchema->defaultBrand.lazyInitializer) = nullptr; |
1921 | #else |
1922 | #error "Platform not supported" |
1923 | #endif |
1924 | } |
1925 | } |
1926 | |
1927 | void SchemaLoader::BrandedInitializerImpl::init(const _::RawBrandedSchema* schema) const { |
1928 | schema->generic->ensureInitialized(); |
1929 | |
1930 | auto lock = loader.impl.lockExclusive(); |
1931 | |
1932 | if (schema->lazyInitializer == nullptr) { |
1933 | // Never mind, someone beat us to it. |
1934 | return; |
1935 | } |
1936 | |
1937 | // Get the mutable version. |
1938 | _::RawBrandedSchema* mutableSchema = KJ_ASSERT_NONNULL( |
1939 | lock->get()->brands.find(SchemaBindingsPair { schema->generic, schema->scopes })); |
1940 | KJ_ASSERT(mutableSchema == schema); |
1941 | |
1942 | // Construct its dependency map. |
1943 | auto deps = lock->get()->makeBrandedDependencies(mutableSchema->generic, |
1944 | kj::arrayPtr(mutableSchema->scopes, mutableSchema->scopeCount)); |
1945 | mutableSchema->dependencies = deps.begin(); |
1946 | mutableSchema->dependencyCount = deps.size(); |
1947 | |
1948 | // It's initialized now, so disable the initializer. |
1949 | #if __GNUC__ |
1950 | __atomic_store_n(&mutableSchema->lazyInitializer, nullptr, __ATOMIC_RELEASE); |
1951 | #elif _MSC_VER |
1952 | std::atomic_thread_fence(std::memory_order_release); |
1953 | *static_cast<_::RawBrandedSchema::Initializer const* volatile*>( |
1954 | &mutableSchema->lazyInitializer) = nullptr; |
1955 | #else |
1956 | #error "Platform not supported" |
1957 | #endif |
1958 | } |
1959 | |
1960 | // ======================================================================================= |
1961 | |
1962 | SchemaLoader::SchemaLoader(): impl(kj::heap<Impl>(*this)) {} |
1963 | SchemaLoader::SchemaLoader(const LazyLoadCallback& callback) |
1964 | : impl(kj::heap<Impl>(*this, callback)) {} |
1965 | SchemaLoader::~SchemaLoader() noexcept(false) {} |
1966 | |
1967 | Schema SchemaLoader::get(uint64_t id, schema::Brand::Reader brand, Schema scope) const { |
1968 | KJ_IF_MAYBE(result, tryGet(id, brand, scope)) { |
1969 | return *result; |
1970 | } else { |
1971 | KJ_FAIL_REQUIRE("no schema node loaded for id" , kj::hex(id)); |
1972 | } |
1973 | } |
1974 | |
1975 | kj::Maybe<Schema> SchemaLoader::tryGet( |
1976 | uint64_t id, schema::Brand::Reader brand, Schema scope) const { |
1977 | auto getResult = impl.lockShared()->get()->tryGet(id); |
1978 | if (getResult.schema == nullptr || getResult.schema->lazyInitializer != nullptr) { |
1979 | // This schema couldn't be found or has yet to be lazily loaded. If we have a lazy loader |
1980 | // callback, invoke it now to try to get it to load this schema. |
1981 | KJ_IF_MAYBE(c, getResult.callback) { |
1982 | c->load(*this, id); |
1983 | } |
1984 | getResult = impl.lockShared()->get()->tryGet(id); |
1985 | } |
1986 | if (getResult.schema != nullptr && getResult.schema->lazyInitializer == nullptr) { |
1987 | if (brand.getScopes().size() > 0) { |
1988 | auto brandedSchema = impl.lockExclusive()->get()->makeBranded( |
1989 | getResult.schema, brand, kj::arrayPtr(scope.raw->scopes, scope.raw->scopeCount)); |
1990 | brandedSchema->ensureInitialized(); |
1991 | return Schema(brandedSchema); |
1992 | } else { |
1993 | return Schema(&getResult.schema->defaultBrand); |
1994 | } |
1995 | } else { |
1996 | return nullptr; |
1997 | } |
1998 | } |
1999 | |
2000 | Schema SchemaLoader::getUnbound(uint64_t id) const { |
2001 | auto schema = get(id); |
2002 | return Schema(impl.lockExclusive()->get()->getUnbound(schema.raw->generic)); |
2003 | } |
2004 | |
2005 | Type SchemaLoader::getType(schema::Type::Reader proto, Schema scope) const { |
2006 | switch (proto.which()) { |
2007 | case schema::Type::VOID: |
2008 | case schema::Type::BOOL: |
2009 | case schema::Type::INT8: |
2010 | case schema::Type::INT16: |
2011 | case schema::Type::INT32: |
2012 | case schema::Type::INT64: |
2013 | case schema::Type::UINT8: |
2014 | case schema::Type::UINT16: |
2015 | case schema::Type::UINT32: |
2016 | case schema::Type::UINT64: |
2017 | case schema::Type::FLOAT32: |
2018 | case schema::Type::FLOAT64: |
2019 | case schema::Type::TEXT: |
2020 | case schema::Type::DATA: |
2021 | return proto.which(); |
2022 | |
2023 | case schema::Type::STRUCT: { |
2024 | auto structType = proto.getStruct(); |
2025 | return get(structType.getTypeId(), structType.getBrand(), scope).asStruct(); |
2026 | } |
2027 | |
2028 | case schema::Type::ENUM: { |
2029 | auto enumType = proto.getEnum(); |
2030 | return get(enumType.getTypeId(), enumType.getBrand(), scope).asEnum(); |
2031 | } |
2032 | |
2033 | case schema::Type::INTERFACE: { |
2034 | auto interfaceType = proto.getInterface(); |
2035 | return get(interfaceType.getTypeId(), interfaceType.getBrand(), scope) |
2036 | .asInterface(); |
2037 | } |
2038 | |
2039 | case schema::Type::LIST: |
2040 | return ListSchema::of(getType(proto.getList().getElementType(), scope)); |
2041 | |
2042 | case schema::Type::ANY_POINTER: { |
2043 | auto anyPointer = proto.getAnyPointer(); |
2044 | switch (anyPointer.which()) { |
2045 | case schema::Type::AnyPointer::UNCONSTRAINED: |
2046 | return schema::Type::ANY_POINTER; |
2047 | case schema::Type::AnyPointer::PARAMETER: { |
2048 | auto param = anyPointer.getParameter(); |
2049 | return scope.getBrandBinding(param.getScopeId(), param.getParameterIndex()); |
2050 | } |
2051 | case schema::Type::AnyPointer::IMPLICIT_METHOD_PARAMETER: |
2052 | // We don't support binding implicit method params here. |
2053 | return schema::Type::ANY_POINTER; |
2054 | } |
2055 | |
2056 | KJ_UNREACHABLE; |
2057 | } |
2058 | } |
2059 | |
2060 | KJ_UNREACHABLE; |
2061 | } |
2062 | |
2063 | Schema SchemaLoader::load(const schema::Node::Reader& reader) { |
2064 | return Schema(&impl.lockExclusive()->get()->load(reader, false)->defaultBrand); |
2065 | } |
2066 | |
2067 | Schema SchemaLoader::loadOnce(const schema::Node::Reader& reader) const { |
2068 | auto locked = impl.lockExclusive(); |
2069 | auto getResult = locked->get()->tryGet(reader.getId()); |
2070 | if (getResult.schema == nullptr || getResult.schema->lazyInitializer != nullptr) { |
2071 | // Doesn't exist yet, or the existing schema is a placeholder and therefore has not yet been |
2072 | // seen publicly. Go ahead and load the incoming reader. |
2073 | return Schema(&locked->get()->load(reader, false)->defaultBrand); |
2074 | } else { |
2075 | return Schema(&getResult.schema->defaultBrand); |
2076 | } |
2077 | } |
2078 | |
2079 | kj::Array<Schema> SchemaLoader::getAllLoaded() const { |
2080 | return impl.lockShared()->get()->getAllLoaded(); |
2081 | } |
2082 | |
2083 | void SchemaLoader::loadNative(const _::RawSchema* nativeSchema) { |
2084 | impl.lockExclusive()->get()->loadNative(nativeSchema); |
2085 | } |
2086 | |
2087 | } // namespace capnp |
2088 | |