1// Copyright (c) 2013-2014 Sandstorm Development Group, Inc. and contributors
2// Licensed under the MIT License:
3//
4// Permission is hereby granted, free of charge, to any person obtaining a copy
5// of this software and associated documentation files (the "Software"), to deal
6// in the Software without restriction, including without limitation the rights
7// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8// copies of the Software, and to permit persons to whom the Software is
9// furnished to do so, subject to the following conditions:
10//
11// The above copyright notice and this permission notice shall be included in
12// all copies or substantial portions of the Software.
13//
14// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
20// THE SOFTWARE.
21
22#define CAPNP_PRIVATE
23#include "arena.h"
24#include "message.h"
25#include <kj/debug.h>
26#include <kj/refcount.h>
27#include <vector>
28#include <string.h>
29#include <stdio.h>
30#include <stdlib.h>
31
32#if !CAPNP_LITE
33#include "capability.h"
34#endif // !CAPNP_LITE
35
36namespace capnp {
37namespace _ { // private
38
39Arena::~Arena() noexcept(false) {}
40
41void ReadLimiter::unread(WordCount64 amount) {
42 // Be careful not to overflow here. Since ReadLimiter has no thread-safety, it's possible that
43 // the limit value was not updated correctly for one or more reads, and therefore unread() could
44 // overflow it even if it is only unreading bytes that were actually read.
45 uint64_t oldValue = limit;
46 uint64_t newValue = oldValue + unbound(amount / WORDS);
47 if (newValue > oldValue) {
48 limit = newValue;
49 }
50}
51
52void SegmentReader::abortCheckObjectFault() {
53 KJ_LOG(FATAL, "checkObject()'s parameter is not in-range; this would segfault in opt mode",
54 "this is a serious bug in Cap'n Proto; please notify security@sandstorm.io");
55 abort();
56}
57
58void SegmentBuilder::throwNotWritable() {
59 KJ_FAIL_REQUIRE(
60 "Tried to form a Builder to an external data segment referenced by the MessageBuilder. "
61 "When you use Orphanage::reference*(), you are not allowed to obtain Builders to the "
62 "referenced data, only Readers, because that data is const.");
63}
64
65// =======================================================================================
66
67static SegmentWordCount verifySegmentSize(size_t size) {
68 auto gsize = bounded(size) * WORDS;
69 return assertMaxBits<SEGMENT_WORD_COUNT_BITS>(gsize, [&]() {
70 KJ_FAIL_REQUIRE("segment is too large", size);
71 });
72}
73
74inline ReaderArena::ReaderArena(MessageReader* message, const word* firstSegment,
75 SegmentWordCount firstSegmentSize)
76 : message(message),
77 readLimiter(bounded(message->getOptions().traversalLimitInWords) * WORDS),
78 segment0(this, SegmentId(0), firstSegment, firstSegmentSize, &readLimiter) {}
79
80inline ReaderArena::ReaderArena(MessageReader* message, kj::ArrayPtr<const word> firstSegment)
81 : ReaderArena(message, firstSegment.begin(), verifySegmentSize(firstSegment.size())) {}
82
83ReaderArena::ReaderArena(MessageReader* message)
84 : ReaderArena(message, message->getSegment(0)) {}
85
86ReaderArena::~ReaderArena() noexcept(false) {}
87
88SegmentReader* ReaderArena::tryGetSegment(SegmentId id) {
89 if (id == SegmentId(0)) {
90 if (segment0.getArray() == nullptr) {
91 return nullptr;
92 } else {
93 return &segment0;
94 }
95 }
96
97 auto lock = moreSegments.lockExclusive();
98
99 SegmentMap* segments = nullptr;
100 KJ_IF_MAYBE(s, *lock) {
101 KJ_IF_MAYBE(segment, s->find(id.value)) {
102 return *segment;
103 }
104 segments = s;
105 }
106
107 kj::ArrayPtr<const word> newSegment = message->getSegment(id.value);
108 if (newSegment == nullptr) {
109 return nullptr;
110 }
111
112 SegmentWordCount newSegmentSize = verifySegmentSize(newSegment.size());
113
114 if (*lock == nullptr) {
115 // OK, the segment exists, so allocate the map.
116 segments = &lock->emplace();
117 }
118
119 auto segment = kj::heap<SegmentReader>(
120 this, id, newSegment.begin(), newSegmentSize, &readLimiter);
121 SegmentReader* result = segment;
122 segments->insert(id.value, kj::mv(segment));
123 return result;
124}
125
126void ReaderArena::reportReadLimitReached() {
127 KJ_FAIL_REQUIRE("Exceeded message traversal limit. See capnp::ReaderOptions.") {
128 return;
129 }
130}
131
132// =======================================================================================
133
134BuilderArena::BuilderArena(MessageBuilder* message)
135 : message(message), segment0(nullptr, SegmentId(0), nullptr, nullptr) {}
136
137BuilderArena::BuilderArena(MessageBuilder* message,
138 kj::ArrayPtr<MessageBuilder::SegmentInit> segments)
139 : message(message),
140 segment0(this, SegmentId(0), segments[0].space.begin(),
141 verifySegmentSize(segments[0].space.size()),
142 &this->dummyLimiter, verifySegmentSize(segments[0].wordsUsed)) {
143 if (segments.size() > 1) {
144 kj::Vector<kj::Own<SegmentBuilder>> builders(segments.size() - 1);
145
146 uint i = 1;
147 for (auto& segment: segments.slice(1, segments.size())) {
148 builders.add(kj::heap<SegmentBuilder>(
149 this, SegmentId(i++), segment.space.begin(), verifySegmentSize(segment.space.size()),
150 &this->dummyLimiter, verifySegmentSize(segment.wordsUsed)));
151 }
152
153 kj::Vector<kj::ArrayPtr<const word>> forOutput;
154 forOutput.resize(segments.size());
155
156 segmentWithSpace = builders.back();
157
158 this->moreSegments = kj::heap<MultiSegmentState>(
159 MultiSegmentState { kj::mv(builders), kj::mv(forOutput) });
160
161 } else {
162 segmentWithSpace = &segment0;
163 }
164}
165
166BuilderArena::~BuilderArena() noexcept(false) {}
167
168SegmentBuilder* BuilderArena::getSegment(SegmentId id) {
169 // This method is allowed to fail if the segment ID is not valid.
170 if (id == SegmentId(0)) {
171 return &segment0;
172 } else {
173 KJ_IF_MAYBE(s, moreSegments) {
174 KJ_REQUIRE(id.value - 1 < s->get()->builders.size(), "invalid segment id", id.value);
175 return const_cast<SegmentBuilder*>(s->get()->builders[id.value - 1].get());
176 } else {
177 KJ_FAIL_REQUIRE("invalid segment id", id.value);
178 }
179 }
180}
181
182BuilderArena::AllocateResult BuilderArena::allocate(SegmentWordCount amount) {
183 if (segment0.getArena() == nullptr) {
184 // We're allocating the first segment.
185 kj::ArrayPtr<word> ptr = message->allocateSegment(unbound(amount / WORDS));
186 auto actualSize = verifySegmentSize(ptr.size());
187
188 // Re-allocate segment0 in-place. This is a bit of a hack, but we have not returned any
189 // pointers to this segment yet, so it should be fine.
190 kj::dtor(segment0);
191 kj::ctor(segment0, this, SegmentId(0), ptr.begin(), actualSize, &this->dummyLimiter);
192
193 segmentWithSpace = &segment0;
194 return AllocateResult { &segment0, segment0.allocate(amount) };
195 } else {
196 if (segmentWithSpace != nullptr) {
197 // Check if there is space in an existing segment.
198 // TODO(perf): Check for available space in more than just the last segment. We don't
199 // want this to be O(n), though, so we'll need to maintain some sort of table. Complicating
200 // matters, we want SegmentBuilders::allocate() to be fast, so we can't update any such
201 // table when allocation actually happens. Instead, we could have a priority queue based
202 // on the last-known available size, and then re-check the size when we pop segments off it
203 // and shove them to the back of the queue if they have become too small.
204 word* attempt = segmentWithSpace->allocate(amount);
205 if (attempt != nullptr) {
206 return AllocateResult { segmentWithSpace, attempt };
207 }
208 }
209
210 // Need to allocate a new segment.
211 SegmentBuilder* result = addSegmentInternal(message->allocateSegment(unbound(amount / WORDS)));
212
213 // Check this new segment first the next time we need to allocate.
214 segmentWithSpace = result;
215
216 // Allocating from the new segment is guaranteed to succeed since we made it big enough.
217 return AllocateResult { result, result->allocate(amount) };
218 }
219}
220
221SegmentBuilder* BuilderArena::addExternalSegment(kj::ArrayPtr<const word> content) {
222 return addSegmentInternal(content);
223}
224
225template <typename T>
226SegmentBuilder* BuilderArena::addSegmentInternal(kj::ArrayPtr<T> content) {
227 // This check should never fail in practice, since you can't get an Orphanage without allocating
228 // the root segment.
229 KJ_REQUIRE(segment0.getArena() != nullptr,
230 "Can't allocate external segments before allocating the root segment.");
231
232 auto contentSize = verifySegmentSize(content.size());
233
234 MultiSegmentState* segmentState;
235 KJ_IF_MAYBE(s, moreSegments) {
236 segmentState = *s;
237 } else {
238 auto newSegmentState = kj::heap<MultiSegmentState>();
239 segmentState = newSegmentState;
240 moreSegments = kj::mv(newSegmentState);
241 }
242
243 kj::Own<SegmentBuilder> newBuilder = kj::heap<SegmentBuilder>(
244 this, SegmentId(segmentState->builders.size() + 1),
245 content.begin(), contentSize, &this->dummyLimiter);
246 SegmentBuilder* result = newBuilder.get();
247 segmentState->builders.add(kj::mv(newBuilder));
248
249 // Keep forOutput the right size so that we don't have to re-allocate during
250 // getSegmentsForOutput(), which callers might reasonably expect is a thread-safe method.
251 segmentState->forOutput.resize(segmentState->builders.size() + 1);
252
253 return result;
254}
255
256kj::ArrayPtr<const kj::ArrayPtr<const word>> BuilderArena::getSegmentsForOutput() {
257 // Although this is a read-only method, we shouldn't need to lock a mutex here because if this
258 // is called multiple times simultaneously, we should only be overwriting the array with the
259 // exact same data. If the number or size of segments is actually changing due to an activity
260 // in another thread, then the caller has a problem regardless of locking here.
261
262 KJ_IF_MAYBE(segmentState, moreSegments) {
263 KJ_DASSERT(segmentState->get()->forOutput.size() == segmentState->get()->builders.size() + 1,
264 "segmentState->forOutput wasn't resized correctly when the last builder was added.",
265 segmentState->get()->forOutput.size(), segmentState->get()->builders.size());
266
267 kj::ArrayPtr<kj::ArrayPtr<const word>> result(
268 &segmentState->get()->forOutput[0], segmentState->get()->forOutput.size());
269 uint i = 0;
270 result[i++] = segment0.currentlyAllocated();
271 for (auto& builder: segmentState->get()->builders) {
272 result[i++] = builder->currentlyAllocated();
273 }
274 return result;
275 } else {
276 if (segment0.getArena() == nullptr) {
277 // We haven't actually allocated any segments yet.
278 return nullptr;
279 } else {
280 // We have only one segment so far.
281 segment0ForOutput = segment0.currentlyAllocated();
282 return kj::arrayPtr(&segment0ForOutput, 1);
283 }
284 }
285}
286
287SegmentReader* BuilderArena::tryGetSegment(SegmentId id) {
288 if (id == SegmentId(0)) {
289 if (segment0.getArena() == nullptr) {
290 // We haven't allocated any segments yet.
291 return nullptr;
292 } else {
293 return &segment0;
294 }
295 } else {
296 KJ_IF_MAYBE(segmentState, moreSegments) {
297 if (id.value <= segmentState->get()->builders.size()) {
298 // TODO(cleanup): Return a const SegmentReader and tediously constify all SegmentBuilder
299 // pointers throughout the codebase.
300 return const_cast<SegmentReader*>(kj::implicitCast<const SegmentReader*>(
301 segmentState->get()->builders[id.value - 1].get()));
302 }
303 }
304 return nullptr;
305 }
306}
307
308void BuilderArena::reportReadLimitReached() {
309 KJ_FAIL_ASSERT("Read limit reached for BuilderArena, but it should have been unlimited.") {
310 return;
311 }
312}
313
314#if !CAPNP_LITE
315kj::Maybe<kj::Own<ClientHook>> BuilderArena::LocalCapTable::extractCap(uint index) {
316 if (index < capTable.size()) {
317 return capTable[index].map([](kj::Own<ClientHook>& cap) { return cap->addRef(); });
318 } else {
319 return nullptr;
320 }
321}
322
323uint BuilderArena::LocalCapTable::injectCap(kj::Own<ClientHook>&& cap) {
324 uint result = capTable.size();
325 capTable.add(kj::mv(cap));
326 return result;
327}
328
329void BuilderArena::LocalCapTable::dropCap(uint index) {
330 KJ_ASSERT(index < capTable.size(), "Invalid capability descriptor in message.") {
331 return;
332 }
333 capTable[index] = nullptr;
334}
335#endif // !CAPNP_LITE
336
337} // namespace _ (private)
338} // namespace capnp
339