1// Protocol Buffers - Google's data interchange format
2// Copyright 2008 Google Inc. All rights reserved.
3// https://developers.google.com/protocol-buffers/
4//
5// Redistribution and use in source and binary forms, with or without
6// modification, are permitted provided that the following conditions are
7// met:
8//
9// * Redistributions of source code must retain the above copyright
10// notice, this list of conditions and the following disclaimer.
11// * Redistributions in binary form must reproduce the above
12// copyright notice, this list of conditions and the following disclaimer
13// in the documentation and/or other materials provided with the
14// distribution.
15// * Neither the name of Google Inc. nor the names of its
16// contributors may be used to endorse or promote products derived from
17// this software without specific prior written permission.
18//
19// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31// Author: kenton@google.com (Kenton Varda)
32// Based on original Protocol Buffers design by
33// Sanjay Ghemawat, Jeff Dean, and others.
34//
35// This file contains the CodedInputStream and CodedOutputStream classes,
36// which wrap a ZeroCopyInputStream or ZeroCopyOutputStream, respectively,
37// and allow you to read or write individual pieces of data in various
38// formats. In particular, these implement the varint encoding for
39// integers, a simple variable-length encoding in which smaller numbers
40// take fewer bytes.
41//
42// Typically these classes will only be used internally by the protocol
43// buffer library in order to encode and decode protocol buffers. Clients
44// of the library only need to know about this class if they wish to write
45// custom message parsing or serialization procedures.
46//
47// CodedOutputStream example:
48// // Write some data to "myfile". First we write a 4-byte "magic number"
49// // to identify the file type, then write a length-delimited string. The
50// // string is composed of a varint giving the length followed by the raw
51// // bytes.
52// int fd = open("myfile", O_CREAT | O_WRONLY);
53// ZeroCopyOutputStream* raw_output = new FileOutputStream(fd);
54// CodedOutputStream* coded_output = new CodedOutputStream(raw_output);
55//
56// int magic_number = 1234;
57// char text[] = "Hello world!";
58// coded_output->WriteLittleEndian32(magic_number);
59// coded_output->WriteVarint32(strlen(text));
60// coded_output->WriteRaw(text, strlen(text));
61//
62// delete coded_output;
63// delete raw_output;
64// close(fd);
65//
66// CodedInputStream example:
67// // Read a file created by the above code.
68// int fd = open("myfile", O_RDONLY);
69// ZeroCopyInputStream* raw_input = new FileInputStream(fd);
70// CodedInputStream* coded_input = new CodedInputStream(raw_input);
71//
72// coded_input->ReadLittleEndian32(&magic_number);
73// if (magic_number != 1234) {
74// cerr << "File not in expected format." << endl;
75// return;
76// }
77//
78// uint32_t size;
79// coded_input->ReadVarint32(&size);
80//
81// char* text = new char[size + 1];
82// coded_input->ReadRaw(buffer, size);
83// text[size] = '\0';
84//
85// delete coded_input;
86// delete raw_input;
87// close(fd);
88//
89// cout << "Text is: " << text << endl;
90// delete [] text;
91//
92// For those who are interested, varint encoding is defined as follows:
93//
94// The encoding operates on unsigned integers of up to 64 bits in length.
95// Each byte of the encoded value has the format:
96// * bits 0-6: Seven bits of the number being encoded.
97// * bit 7: Zero if this is the last byte in the encoding (in which
98// case all remaining bits of the number are zero) or 1 if
99// more bytes follow.
100// The first byte contains the least-significant 7 bits of the number, the
101// second byte (if present) contains the next-least-significant 7 bits,
102// and so on. So, the binary number 1011000101011 would be encoded in two
103// bytes as "10101011 00101100".
104//
105// In theory, varint could be used to encode integers of any length.
106// However, for practicality we set a limit at 64 bits. The maximum encoded
107// length of a number is thus 10 bytes.
108
109#ifndef GOOGLE_PROTOBUF_IO_CODED_STREAM_H__
110#define GOOGLE_PROTOBUF_IO_CODED_STREAM_H__
111
112
113#include <assert.h>
114
115#include <atomic>
116#include <climits>
117#include <cstddef>
118#include <cstring>
119#include <limits>
120#include <string>
121#include <type_traits>
122#include <utility>
123
124#if defined(_MSC_VER) && _MSC_VER >= 1300 && !defined(__INTEL_COMPILER)
125// If MSVC has "/RTCc" set, it will complain about truncating casts at
126// runtime. This file contains some intentional truncating casts.
127#pragma runtime_checks("c", off)
128#endif
129
130
131#include <google/protobuf/stubs/common.h>
132#include <google/protobuf/stubs/logging.h>
133#include <google/protobuf/stubs/strutil.h>
134#include <google/protobuf/port.h>
135#include <google/protobuf/stubs/port.h>
136
137
138// Must be included last.
139#include <google/protobuf/port_def.inc>
140
141namespace google {
142namespace protobuf {
143
144class DescriptorPool;
145class MessageFactory;
146class ZeroCopyCodedInputStream;
147
148namespace internal {
149void MapTestForceDeterministic();
150class EpsCopyByteStream;
151} // namespace internal
152
153namespace io {
154
155// Defined in this file.
156class CodedInputStream;
157class CodedOutputStream;
158
159// Defined in other files.
160class ZeroCopyInputStream; // zero_copy_stream.h
161class ZeroCopyOutputStream; // zero_copy_stream.h
162
163// Class which reads and decodes binary data which is composed of varint-
164// encoded integers and fixed-width pieces. Wraps a ZeroCopyInputStream.
165// Most users will not need to deal with CodedInputStream.
166//
167// Most methods of CodedInputStream that return a bool return false if an
168// underlying I/O error occurs or if the data is malformed. Once such a
169// failure occurs, the CodedInputStream is broken and is no longer useful.
170// After a failure, callers also should assume writes to "out" args may have
171// occurred, though nothing useful can be determined from those writes.
172class PROTOBUF_EXPORT CodedInputStream {
173 public:
174 // Create a CodedInputStream that reads from the given ZeroCopyInputStream.
175 explicit CodedInputStream(ZeroCopyInputStream* input);
176
177 // Create a CodedInputStream that reads from the given flat array. This is
178 // faster than using an ArrayInputStream. PushLimit(size) is implied by
179 // this constructor.
180 explicit CodedInputStream(const uint8_t* buffer, int size);
181
182 // Destroy the CodedInputStream and position the underlying
183 // ZeroCopyInputStream at the first unread byte. If an error occurred while
184 // reading (causing a method to return false), then the exact position of
185 // the input stream may be anywhere between the last value that was read
186 // successfully and the stream's byte limit.
187 ~CodedInputStream();
188
189 // Return true if this CodedInputStream reads from a flat array instead of
190 // a ZeroCopyInputStream.
191 inline bool IsFlat() const;
192
193 // Skips a number of bytes. Returns false if an underlying read error
194 // occurs.
195 inline bool Skip(int count);
196
197 // Sets *data to point directly at the unread part of the CodedInputStream's
198 // underlying buffer, and *size to the size of that buffer, but does not
199 // advance the stream's current position. This will always either produce
200 // a non-empty buffer or return false. If the caller consumes any of
201 // this data, it should then call Skip() to skip over the consumed bytes.
202 // This may be useful for implementing external fast parsing routines for
203 // types of data not covered by the CodedInputStream interface.
204 bool GetDirectBufferPointer(const void** data, int* size);
205
206 // Like GetDirectBufferPointer, but this method is inlined, and does not
207 // attempt to Refresh() if the buffer is currently empty.
208 PROTOBUF_ALWAYS_INLINE
209 void GetDirectBufferPointerInline(const void** data, int* size);
210
211 // Read raw bytes, copying them into the given buffer.
212 bool ReadRaw(void* buffer, int size);
213
214 // Like ReadRaw, but reads into a string.
215 bool ReadString(std::string* buffer, int size);
216
217
218 // Read a 32-bit little-endian integer.
219 bool ReadLittleEndian32(uint32_t* value);
220 // Read a 64-bit little-endian integer.
221 bool ReadLittleEndian64(uint64_t* value);
222
223 // These methods read from an externally provided buffer. The caller is
224 // responsible for ensuring that the buffer has sufficient space.
225 // Read a 32-bit little-endian integer.
226 static const uint8_t* ReadLittleEndian32FromArray(const uint8_t* buffer,
227 uint32_t* value);
228 // Read a 64-bit little-endian integer.
229 static const uint8_t* ReadLittleEndian64FromArray(const uint8_t* buffer,
230 uint64_t* value);
231
232 // Read an unsigned integer with Varint encoding, truncating to 32 bits.
233 // Reading a 32-bit value is equivalent to reading a 64-bit one and casting
234 // it to uint32_t, but may be more efficient.
235 bool ReadVarint32(uint32_t* value);
236 // Read an unsigned integer with Varint encoding.
237 bool ReadVarint64(uint64_t* value);
238
239 // Reads a varint off the wire into an "int". This should be used for reading
240 // sizes off the wire (sizes of strings, submessages, bytes fields, etc).
241 //
242 // The value from the wire is interpreted as unsigned. If its value exceeds
243 // the representable value of an integer on this platform, instead of
244 // truncating we return false. Truncating (as performed by ReadVarint32()
245 // above) is an acceptable approach for fields representing an integer, but
246 // when we are parsing a size from the wire, truncating the value would result
247 // in us misparsing the payload.
248 bool ReadVarintSizeAsInt(int* value);
249
250 // Read a tag. This calls ReadVarint32() and returns the result, or returns
251 // zero (which is not a valid tag) if ReadVarint32() fails. Also, ReadTag
252 // (but not ReadTagNoLastTag) updates the last tag value, which can be checked
253 // with LastTagWas().
254 //
255 // Always inline because this is only called in one place per parse loop
256 // but it is called for every iteration of said loop, so it should be fast.
257 // GCC doesn't want to inline this by default.
258 PROTOBUF_ALWAYS_INLINE uint32_t ReadTag() {
259 return last_tag_ = ReadTagNoLastTag();
260 }
261
262 PROTOBUF_ALWAYS_INLINE uint32_t ReadTagNoLastTag();
263
264 // This usually a faster alternative to ReadTag() when cutoff is a manifest
265 // constant. It does particularly well for cutoff >= 127. The first part
266 // of the return value is the tag that was read, though it can also be 0 in
267 // the cases where ReadTag() would return 0. If the second part is true
268 // then the tag is known to be in [0, cutoff]. If not, the tag either is
269 // above cutoff or is 0. (There's intentional wiggle room when tag is 0,
270 // because that can arise in several ways, and for best performance we want
271 // to avoid an extra "is tag == 0?" check here.)
272 PROTOBUF_ALWAYS_INLINE
273 std::pair<uint32_t, bool> ReadTagWithCutoff(uint32_t cutoff) {
274 std::pair<uint32_t, bool> result = ReadTagWithCutoffNoLastTag(cutoff);
275 last_tag_ = result.first;
276 return result;
277 }
278
279 PROTOBUF_ALWAYS_INLINE
280 std::pair<uint32_t, bool> ReadTagWithCutoffNoLastTag(uint32_t cutoff);
281
282 // Usually returns true if calling ReadVarint32() now would produce the given
283 // value. Will always return false if ReadVarint32() would not return the
284 // given value. If ExpectTag() returns true, it also advances past
285 // the varint. For best performance, use a compile-time constant as the
286 // parameter.
287 // Always inline because this collapses to a small number of instructions
288 // when given a constant parameter, but GCC doesn't want to inline by default.
289 PROTOBUF_ALWAYS_INLINE bool ExpectTag(uint32_t expected);
290
291 // Like above, except this reads from the specified buffer. The caller is
292 // responsible for ensuring that the buffer is large enough to read a varint
293 // of the expected size. For best performance, use a compile-time constant as
294 // the expected tag parameter.
295 //
296 // Returns a pointer beyond the expected tag if it was found, or NULL if it
297 // was not.
298 PROTOBUF_ALWAYS_INLINE
299 static const uint8_t* ExpectTagFromArray(const uint8_t* buffer,
300 uint32_t expected);
301
302 // Usually returns true if no more bytes can be read. Always returns false
303 // if more bytes can be read. If ExpectAtEnd() returns true, a subsequent
304 // call to LastTagWas() will act as if ReadTag() had been called and returned
305 // zero, and ConsumedEntireMessage() will return true.
306 bool ExpectAtEnd();
307
308 // If the last call to ReadTag() or ReadTagWithCutoff() returned the given
309 // value, returns true. Otherwise, returns false.
310 // ReadTagNoLastTag/ReadTagWithCutoffNoLastTag do not preserve the last
311 // returned value.
312 //
313 // This is needed because parsers for some types of embedded messages
314 // (with field type TYPE_GROUP) don't actually know that they've reached the
315 // end of a message until they see an ENDGROUP tag, which was actually part
316 // of the enclosing message. The enclosing message would like to check that
317 // tag to make sure it had the right number, so it calls LastTagWas() on
318 // return from the embedded parser to check.
319 bool LastTagWas(uint32_t expected);
320 void SetLastTag(uint32_t tag) { last_tag_ = tag; }
321
322 // When parsing message (but NOT a group), this method must be called
323 // immediately after MergeFromCodedStream() returns (if it returns true)
324 // to further verify that the message ended in a legitimate way. For
325 // example, this verifies that parsing did not end on an end-group tag.
326 // It also checks for some cases where, due to optimizations,
327 // MergeFromCodedStream() can incorrectly return true.
328 bool ConsumedEntireMessage();
329 void SetConsumed() { legitimate_message_end_ = true; }
330
331 // Limits ----------------------------------------------------------
332 // Limits are used when parsing length-delimited embedded messages.
333 // After the message's length is read, PushLimit() is used to prevent
334 // the CodedInputStream from reading beyond that length. Once the
335 // embedded message has been parsed, PopLimit() is called to undo the
336 // limit.
337
338 // Opaque type used with PushLimit() and PopLimit(). Do not modify
339 // values of this type yourself. The only reason that this isn't a
340 // struct with private internals is for efficiency.
341 typedef int Limit;
342
343 // Places a limit on the number of bytes that the stream may read,
344 // starting from the current position. Once the stream hits this limit,
345 // it will act like the end of the input has been reached until PopLimit()
346 // is called.
347 //
348 // As the names imply, the stream conceptually has a stack of limits. The
349 // shortest limit on the stack is always enforced, even if it is not the
350 // top limit.
351 //
352 // The value returned by PushLimit() is opaque to the caller, and must
353 // be passed unchanged to the corresponding call to PopLimit().
354 Limit PushLimit(int byte_limit);
355
356 // Pops the last limit pushed by PushLimit(). The input must be the value
357 // returned by that call to PushLimit().
358 void PopLimit(Limit limit);
359
360 // Returns the number of bytes left until the nearest limit on the
361 // stack is hit, or -1 if no limits are in place.
362 int BytesUntilLimit() const;
363
364 // Returns current position relative to the beginning of the input stream.
365 int CurrentPosition() const;
366
367 // Total Bytes Limit -----------------------------------------------
368 // To prevent malicious users from sending excessively large messages
369 // and causing memory exhaustion, CodedInputStream imposes a hard limit on
370 // the total number of bytes it will read.
371
372 // Sets the maximum number of bytes that this CodedInputStream will read
373 // before refusing to continue. To prevent servers from allocating enormous
374 // amounts of memory to hold parsed messages, the maximum message length
375 // should be limited to the shortest length that will not harm usability.
376 // The default limit is INT_MAX (~2GB) and apps should set shorter limits
377 // if possible. An error will always be printed to stderr if the limit is
378 // reached.
379 //
380 // Note: setting a limit less than the current read position is interpreted
381 // as a limit on the current position.
382 //
383 // This is unrelated to PushLimit()/PopLimit().
384 void SetTotalBytesLimit(int total_bytes_limit);
385
386 // The Total Bytes Limit minus the Current Position, or -1 if the total bytes
387 // limit is INT_MAX.
388 int BytesUntilTotalBytesLimit() const;
389
390 // Recursion Limit -------------------------------------------------
391 // To prevent corrupt or malicious messages from causing stack overflows,
392 // we must keep track of the depth of recursion when parsing embedded
393 // messages and groups. CodedInputStream keeps track of this because it
394 // is the only object that is passed down the stack during parsing.
395
396 // Sets the maximum recursion depth. The default is 100.
397 void SetRecursionLimit(int limit);
398 int RecursionBudget() { return recursion_budget_; }
399
400 static int GetDefaultRecursionLimit() { return default_recursion_limit_; }
401
402 // Increments the current recursion depth. Returns true if the depth is
403 // under the limit, false if it has gone over.
404 bool IncrementRecursionDepth();
405
406 // Decrements the recursion depth if possible.
407 void DecrementRecursionDepth();
408
409 // Decrements the recursion depth blindly. This is faster than
410 // DecrementRecursionDepth(). It should be used only if all previous
411 // increments to recursion depth were successful.
412 void UnsafeDecrementRecursionDepth();
413
414 // Shorthand for make_pair(PushLimit(byte_limit), --recursion_budget_).
415 // Using this can reduce code size and complexity in some cases. The caller
416 // is expected to check that the second part of the result is non-negative (to
417 // bail out if the depth of recursion is too high) and, if all is well, to
418 // later pass the first part of the result to PopLimit() or similar.
419 std::pair<CodedInputStream::Limit, int> IncrementRecursionDepthAndPushLimit(
420 int byte_limit);
421
422 // Shorthand for PushLimit(ReadVarint32(&length) ? length : 0).
423 Limit ReadLengthAndPushLimit();
424
425 // Helper that is equivalent to: {
426 // bool result = ConsumedEntireMessage();
427 // PopLimit(limit);
428 // UnsafeDecrementRecursionDepth();
429 // return result; }
430 // Using this can reduce code size and complexity in some cases.
431 // Do not use unless the current recursion depth is greater than zero.
432 bool DecrementRecursionDepthAndPopLimit(Limit limit);
433
434 // Helper that is equivalent to: {
435 // bool result = ConsumedEntireMessage();
436 // PopLimit(limit);
437 // return result; }
438 // Using this can reduce code size and complexity in some cases.
439 bool CheckEntireMessageConsumedAndPopLimit(Limit limit);
440
441 // Extension Registry ----------------------------------------------
442 // ADVANCED USAGE: 99.9% of people can ignore this section.
443 //
444 // By default, when parsing extensions, the parser looks for extension
445 // definitions in the pool which owns the outer message's Descriptor.
446 // However, you may call SetExtensionRegistry() to provide an alternative
447 // pool instead. This makes it possible, for example, to parse a message
448 // using a generated class, but represent some extensions using
449 // DynamicMessage.
450
451 // Set the pool used to look up extensions. Most users do not need to call
452 // this as the correct pool will be chosen automatically.
453 //
454 // WARNING: It is very easy to misuse this. Carefully read the requirements
455 // below. Do not use this unless you are sure you need it. Almost no one
456 // does.
457 //
458 // Let's say you are parsing a message into message object m, and you want
459 // to take advantage of SetExtensionRegistry(). You must follow these
460 // requirements:
461 //
462 // The given DescriptorPool must contain m->GetDescriptor(). It is not
463 // sufficient for it to simply contain a descriptor that has the same name
464 // and content -- it must be the *exact object*. In other words:
465 // assert(pool->FindMessageTypeByName(m->GetDescriptor()->full_name()) ==
466 // m->GetDescriptor());
467 // There are two ways to satisfy this requirement:
468 // 1) Use m->GetDescriptor()->pool() as the pool. This is generally useless
469 // because this is the pool that would be used anyway if you didn't call
470 // SetExtensionRegistry() at all.
471 // 2) Use a DescriptorPool which has m->GetDescriptor()->pool() as an
472 // "underlay". Read the documentation for DescriptorPool for more
473 // information about underlays.
474 //
475 // You must also provide a MessageFactory. This factory will be used to
476 // construct Message objects representing extensions. The factory's
477 // GetPrototype() MUST return non-NULL for any Descriptor which can be found
478 // through the provided pool.
479 //
480 // If the provided factory might return instances of protocol-compiler-
481 // generated (i.e. compiled-in) types, or if the outer message object m is
482 // a generated type, then the given factory MUST have this property: If
483 // GetPrototype() is given a Descriptor which resides in
484 // DescriptorPool::generated_pool(), the factory MUST return the same
485 // prototype which MessageFactory::generated_factory() would return. That
486 // is, given a descriptor for a generated type, the factory must return an
487 // instance of the generated class (NOT DynamicMessage). However, when
488 // given a descriptor for a type that is NOT in generated_pool, the factory
489 // is free to return any implementation.
490 //
491 // The reason for this requirement is that generated sub-objects may be
492 // accessed via the standard (non-reflection) extension accessor methods,
493 // and these methods will down-cast the object to the generated class type.
494 // If the object is not actually of that type, the results would be undefined.
495 // On the other hand, if an extension is not compiled in, then there is no
496 // way the code could end up accessing it via the standard accessors -- the
497 // only way to access the extension is via reflection. When using reflection,
498 // DynamicMessage and generated messages are indistinguishable, so it's fine
499 // if these objects are represented using DynamicMessage.
500 //
501 // Using DynamicMessageFactory on which you have called
502 // SetDelegateToGeneratedFactory(true) should be sufficient to satisfy the
503 // above requirement.
504 //
505 // If either pool or factory is NULL, both must be NULL.
506 //
507 // Note that this feature is ignored when parsing "lite" messages as they do
508 // not have descriptors.
509 void SetExtensionRegistry(const DescriptorPool* pool,
510 MessageFactory* factory);
511
512 // Get the DescriptorPool set via SetExtensionRegistry(), or NULL if no pool
513 // has been provided.
514 const DescriptorPool* GetExtensionPool();
515
516 // Get the MessageFactory set via SetExtensionRegistry(), or NULL if no
517 // factory has been provided.
518 MessageFactory* GetExtensionFactory();
519
520 private:
521 GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CodedInputStream);
522
523 const uint8_t* buffer_;
524 const uint8_t* buffer_end_; // pointer to the end of the buffer.
525 ZeroCopyInputStream* input_;
526 int total_bytes_read_; // total bytes read from input_, including
527 // the current buffer
528
529 // If total_bytes_read_ surpasses INT_MAX, we record the extra bytes here
530 // so that we can BackUp() on destruction.
531 int overflow_bytes_;
532
533 // LastTagWas() stuff.
534 uint32_t last_tag_; // result of last ReadTag() or ReadTagWithCutoff().
535
536 // This is set true by ReadTag{Fallback/Slow}() if it is called when exactly
537 // at EOF, or by ExpectAtEnd() when it returns true. This happens when we
538 // reach the end of a message and attempt to read another tag.
539 bool legitimate_message_end_;
540
541 // See EnableAliasing().
542 bool aliasing_enabled_;
543
544 // Limits
545 Limit current_limit_; // if position = -1, no limit is applied
546
547 // For simplicity, if the current buffer crosses a limit (either a normal
548 // limit created by PushLimit() or the total bytes limit), buffer_size_
549 // only tracks the number of bytes before that limit. This field
550 // contains the number of bytes after it. Note that this implies that if
551 // buffer_size_ == 0 and buffer_size_after_limit_ > 0, we know we've
552 // hit a limit. However, if both are zero, it doesn't necessarily mean
553 // we aren't at a limit -- the buffer may have ended exactly at the limit.
554 int buffer_size_after_limit_;
555
556 // Maximum number of bytes to read, period. This is unrelated to
557 // current_limit_. Set using SetTotalBytesLimit().
558 int total_bytes_limit_;
559
560 // Current recursion budget, controlled by IncrementRecursionDepth() and
561 // similar. Starts at recursion_limit_ and goes down: if this reaches
562 // -1 we are over budget.
563 int recursion_budget_;
564 // Recursion depth limit, set by SetRecursionLimit().
565 int recursion_limit_;
566
567 // See SetExtensionRegistry().
568 const DescriptorPool* extension_pool_;
569 MessageFactory* extension_factory_;
570
571 // Private member functions.
572
573 // Fallback when Skip() goes past the end of the current buffer.
574 bool SkipFallback(int count, int original_buffer_size);
575
576 // Advance the buffer by a given number of bytes.
577 void Advance(int amount);
578
579 // Back up input_ to the current buffer position.
580 void BackUpInputToCurrentPosition();
581
582 // Recomputes the value of buffer_size_after_limit_. Must be called after
583 // current_limit_ or total_bytes_limit_ changes.
584 void RecomputeBufferLimits();
585
586 // Writes an error message saying that we hit total_bytes_limit_.
587 void PrintTotalBytesLimitError();
588
589 // Called when the buffer runs out to request more data. Implies an
590 // Advance(BufferSize()).
591 bool Refresh();
592
593 // When parsing varints, we optimize for the common case of small values, and
594 // then optimize for the case when the varint fits within the current buffer
595 // piece. The Fallback method is used when we can't use the one-byte
596 // optimization. The Slow method is yet another fallback when the buffer is
597 // not large enough. Making the slow path out-of-line speeds up the common
598 // case by 10-15%. The slow path is fairly uncommon: it only triggers when a
599 // message crosses multiple buffers. Note: ReadVarint32Fallback() and
600 // ReadVarint64Fallback() are called frequently and generally not inlined, so
601 // they have been optimized to avoid "out" parameters. The former returns -1
602 // if it fails and the uint32_t it read otherwise. The latter has a bool
603 // indicating success or failure as part of its return type.
604 int64_t ReadVarint32Fallback(uint32_t first_byte_or_zero);
605 int ReadVarintSizeAsIntFallback();
606 std::pair<uint64_t, bool> ReadVarint64Fallback();
607 bool ReadVarint32Slow(uint32_t* value);
608 bool ReadVarint64Slow(uint64_t* value);
609 int ReadVarintSizeAsIntSlow();
610 bool ReadLittleEndian32Fallback(uint32_t* value);
611 bool ReadLittleEndian64Fallback(uint64_t* value);
612
613 // Fallback/slow methods for reading tags. These do not update last_tag_,
614 // but will set legitimate_message_end_ if we are at the end of the input
615 // stream.
616 uint32_t ReadTagFallback(uint32_t first_byte_or_zero);
617 uint32_t ReadTagSlow();
618 bool ReadStringFallback(std::string* buffer, int size);
619
620 // Return the size of the buffer.
621 int BufferSize() const;
622
623 static const int kDefaultTotalBytesLimit = INT_MAX;
624
625 static int default_recursion_limit_; // 100 by default.
626
627 friend class google::protobuf::ZeroCopyCodedInputStream;
628 friend class google::protobuf::internal::EpsCopyByteStream;
629};
630
631// EpsCopyOutputStream wraps a ZeroCopyOutputStream and exposes a new stream,
632// which has the property you can write kSlopBytes (16 bytes) from the current
633// position without bounds checks. The cursor into the stream is managed by
634// the user of the class and is an explicit parameter in the methods. Careful
635// use of this class, ie. keep ptr a local variable, eliminates the need to
636// for the compiler to sync the ptr value between register and memory.
637class PROTOBUF_EXPORT EpsCopyOutputStream {
638 public:
639 enum { kSlopBytes = 16 };
640
641 // Initialize from a stream.
642 EpsCopyOutputStream(ZeroCopyOutputStream* stream, bool deterministic,
643 uint8_t** pp)
644 : end_(buffer_),
645 stream_(stream),
646 is_serialization_deterministic_(deterministic) {
647 *pp = buffer_;
648 }
649
650 // Only for array serialization. No overflow protection, end_ will be the
651 // pointed to the end of the array. When using this the total size is already
652 // known, so no need to maintain the slop region.
653 EpsCopyOutputStream(void* data, int size, bool deterministic)
654 : end_(static_cast<uint8_t*>(data) + size),
655 buffer_end_(nullptr),
656 stream_(nullptr),
657 is_serialization_deterministic_(deterministic) {}
658
659 // Initialize from stream but with the first buffer already given (eager).
660 EpsCopyOutputStream(void* data, int size, ZeroCopyOutputStream* stream,
661 bool deterministic, uint8_t** pp)
662 : stream_(stream), is_serialization_deterministic_(deterministic) {
663 *pp = SetInitialBuffer(data, size);
664 }
665
666 // Flush everything that's written into the underlying ZeroCopyOutputStream
667 // and trims the underlying stream to the location of ptr.
668 uint8_t* Trim(uint8_t* ptr);
669
670 // After this it's guaranteed you can safely write kSlopBytes to ptr. This
671 // will never fail! The underlying stream can produce an error. Use HadError
672 // to check for errors.
673 PROTOBUF_NODISCARD uint8_t* EnsureSpace(uint8_t* ptr) {
674 if (PROTOBUF_PREDICT_FALSE(ptr >= end_)) {
675 return EnsureSpaceFallback(ptr);
676 }
677 return ptr;
678 }
679
680 uint8_t* WriteRaw(const void* data, int size, uint8_t* ptr) {
681 if (PROTOBUF_PREDICT_FALSE(end_ - ptr < size)) {
682 return WriteRawFallback(data, size, ptr);
683 }
684 std::memcpy(dest: ptr, src: data, n: size);
685 return ptr + size;
686 }
687 // Writes the buffer specified by data, size to the stream. Possibly by
688 // aliasing the buffer (ie. not copying the data). The caller is responsible
689 // to make sure the buffer is alive for the duration of the
690 // ZeroCopyOutputStream.
691#ifndef NDEBUG
692 PROTOBUF_NOINLINE
693#endif
694 uint8_t* WriteRawMaybeAliased(const void* data, int size, uint8_t* ptr) {
695 if (aliasing_enabled_) {
696 return WriteAliasedRaw(data, size, ptr);
697 } else {
698 return WriteRaw(data, size, ptr);
699 }
700 }
701
702
703#ifndef NDEBUG
704 PROTOBUF_NOINLINE
705#endif
706 uint8_t* WriteStringMaybeAliased(uint32_t num, const std::string& s,
707 uint8_t* ptr) {
708 std::ptrdiff_t size = s.size();
709 if (PROTOBUF_PREDICT_FALSE(
710 size >= 128 || end_ - ptr + 16 - TagSize(num << 3) - 1 < size)) {
711 return WriteStringMaybeAliasedOutline(num, s, ptr);
712 }
713 ptr = UnsafeVarint(value: (num << 3) | 2, ptr);
714 *ptr++ = static_cast<uint8_t>(size);
715 std::memcpy(dest: ptr, src: s.data(), n: size);
716 return ptr + size;
717 }
718 uint8_t* WriteBytesMaybeAliased(uint32_t num, const std::string& s,
719 uint8_t* ptr) {
720 return WriteStringMaybeAliased(num, s, ptr);
721 }
722
723 template <typename T>
724 PROTOBUF_ALWAYS_INLINE uint8_t* WriteString(uint32_t num, const T& s,
725 uint8_t* ptr) {
726 std::ptrdiff_t size = s.size();
727 if (PROTOBUF_PREDICT_FALSE(
728 size >= 128 || end_ - ptr + 16 - TagSize(num << 3) - 1 < size)) {
729 return WriteStringOutline(num, s, ptr);
730 }
731 ptr = UnsafeVarint(value: (num << 3) | 2, ptr);
732 *ptr++ = static_cast<uint8_t>(size);
733 std::memcpy(dest: ptr, src: s.data(), n: size);
734 return ptr + size;
735 }
736 template <typename T>
737#ifndef NDEBUG
738 PROTOBUF_NOINLINE
739#endif
740 uint8_t* WriteBytes(uint32_t num, const T& s, uint8_t* ptr) {
741 return WriteString(num, s, ptr);
742 }
743
744 template <typename T>
745 PROTOBUF_ALWAYS_INLINE uint8_t* WriteInt32Packed(int num, const T& r,
746 int size, uint8_t* ptr) {
747 return WriteVarintPacked(num, r, size, ptr, Encode64);
748 }
749 template <typename T>
750 PROTOBUF_ALWAYS_INLINE uint8_t* WriteUInt32Packed(int num, const T& r,
751 int size, uint8_t* ptr) {
752 return WriteVarintPacked(num, r, size, ptr, Encode32);
753 }
754 template <typename T>
755 PROTOBUF_ALWAYS_INLINE uint8_t* WriteSInt32Packed(int num, const T& r,
756 int size, uint8_t* ptr) {
757 return WriteVarintPacked(num, r, size, ptr, ZigZagEncode32);
758 }
759 template <typename T>
760 PROTOBUF_ALWAYS_INLINE uint8_t* WriteInt64Packed(int num, const T& r,
761 int size, uint8_t* ptr) {
762 return WriteVarintPacked(num, r, size, ptr, Encode64);
763 }
764 template <typename T>
765 PROTOBUF_ALWAYS_INLINE uint8_t* WriteUInt64Packed(int num, const T& r,
766 int size, uint8_t* ptr) {
767 return WriteVarintPacked(num, r, size, ptr, Encode64);
768 }
769 template <typename T>
770 PROTOBUF_ALWAYS_INLINE uint8_t* WriteSInt64Packed(int num, const T& r,
771 int size, uint8_t* ptr) {
772 return WriteVarintPacked(num, r, size, ptr, ZigZagEncode64);
773 }
774 template <typename T>
775 PROTOBUF_ALWAYS_INLINE uint8_t* WriteEnumPacked(int num, const T& r, int size,
776 uint8_t* ptr) {
777 return WriteVarintPacked(num, r, size, ptr, Encode64);
778 }
779
780 template <typename T>
781 PROTOBUF_ALWAYS_INLINE uint8_t* WriteFixedPacked(int num, const T& r,
782 uint8_t* ptr) {
783 ptr = EnsureSpace(ptr);
784 constexpr auto element_size = sizeof(typename T::value_type);
785 auto size = r.size() * element_size;
786 ptr = WriteLengthDelim(num, size, ptr);
787 return WriteRawLittleEndian<element_size>(r.data(), static_cast<int>(size),
788 ptr);
789 }
790
791 // Returns true if there was an underlying I/O error since this object was
792 // created.
793 bool HadError() const { return had_error_; }
794
795 // Instructs the EpsCopyOutputStream to allow the underlying
796 // ZeroCopyOutputStream to hold pointers to the original structure instead of
797 // copying, if it supports it (i.e. output->AllowsAliasing() is true). If the
798 // underlying stream does not support aliasing, then enabling it has no
799 // affect. For now, this only affects the behavior of
800 // WriteRawMaybeAliased().
801 //
802 // NOTE: It is caller's responsibility to ensure that the chunk of memory
803 // remains live until all of the data has been consumed from the stream.
804 void EnableAliasing(bool enabled);
805
806 // See documentation on CodedOutputStream::SetSerializationDeterministic.
807 void SetSerializationDeterministic(bool value) {
808 is_serialization_deterministic_ = value;
809 }
810
811 // See documentation on CodedOutputStream::IsSerializationDeterministic.
812 bool IsSerializationDeterministic() const {
813 return is_serialization_deterministic_;
814 }
815
816 // The number of bytes written to the stream at position ptr, relative to the
817 // stream's overall position.
818 int64_t ByteCount(uint8_t* ptr) const;
819
820
821 private:
822 uint8_t* end_;
823 uint8_t* buffer_end_ = buffer_;
824 uint8_t buffer_[2 * kSlopBytes];
825 ZeroCopyOutputStream* stream_;
826 bool had_error_ = false;
827 bool aliasing_enabled_ = false; // See EnableAliasing().
828 bool is_serialization_deterministic_;
829 bool skip_check_consistency = false;
830
831 uint8_t* EnsureSpaceFallback(uint8_t* ptr);
832 inline uint8_t* Next();
833 int Flush(uint8_t* ptr);
834 std::ptrdiff_t GetSize(uint8_t* ptr) const {
835 GOOGLE_DCHECK(ptr <= end_ + kSlopBytes); // NOLINT
836 return end_ + kSlopBytes - ptr;
837 }
838
839 uint8_t* Error() {
840 had_error_ = true;
841 // We use the patch buffer to always guarantee space to write to.
842 end_ = buffer_ + kSlopBytes;
843 return buffer_;
844 }
845
846 static constexpr int TagSize(uint32_t tag) {
847 return (tag < (1 << 7)) ? 1
848 : (tag < (1 << 14)) ? 2
849 : (tag < (1 << 21)) ? 3
850 : (tag < (1 << 28)) ? 4
851 : 5;
852 }
853
854 PROTOBUF_ALWAYS_INLINE uint8_t* WriteTag(uint32_t num, uint32_t wt,
855 uint8_t* ptr) {
856 GOOGLE_DCHECK(ptr < end_); // NOLINT
857 return UnsafeVarint(value: (num << 3) | wt, ptr);
858 }
859
860 PROTOBUF_ALWAYS_INLINE uint8_t* WriteLengthDelim(int num, uint32_t size,
861 uint8_t* ptr) {
862 ptr = WriteTag(num, wt: 2, ptr);
863 return UnsafeWriteSize(value: size, ptr);
864 }
865
866 uint8_t* WriteRawFallback(const void* data, int size, uint8_t* ptr);
867
868 uint8_t* WriteAliasedRaw(const void* data, int size, uint8_t* ptr);
869
870 uint8_t* WriteStringMaybeAliasedOutline(uint32_t num, const std::string& s,
871 uint8_t* ptr);
872 uint8_t* WriteStringOutline(uint32_t num, const std::string& s, uint8_t* ptr);
873
874 template <typename T, typename E>
875 PROTOBUF_ALWAYS_INLINE uint8_t* WriteVarintPacked(int num, const T& r,
876 int size, uint8_t* ptr,
877 const E& encode) {
878 ptr = EnsureSpace(ptr);
879 ptr = WriteLengthDelim(num, size, ptr);
880 auto it = r.data();
881 auto end = it + r.size();
882 do {
883 ptr = EnsureSpace(ptr);
884 ptr = UnsafeVarint(encode(*it++), ptr);
885 } while (it < end);
886 return ptr;
887 }
888
889 static uint32_t Encode32(uint32_t v) { return v; }
890 static uint64_t Encode64(uint64_t v) { return v; }
891 static uint32_t ZigZagEncode32(int32_t v) {
892 return (static_cast<uint32_t>(v) << 1) ^ static_cast<uint32_t>(v >> 31);
893 }
894 static uint64_t ZigZagEncode64(int64_t v) {
895 return (static_cast<uint64_t>(v) << 1) ^ static_cast<uint64_t>(v >> 63);
896 }
897
898 template <typename T>
899 PROTOBUF_ALWAYS_INLINE static uint8_t* UnsafeVarint(T value, uint8_t* ptr) {
900 static_assert(std::is_unsigned<T>::value,
901 "Varint serialization must be unsigned");
902 ptr[0] = static_cast<uint8_t>(value);
903 if (value < 0x80) {
904 return ptr + 1;
905 }
906 // Turn on continuation bit in the byte we just wrote.
907 ptr[0] |= static_cast<uint8_t>(0x80);
908 value >>= 7;
909 ptr[1] = static_cast<uint8_t>(value);
910 if (value < 0x80) {
911 return ptr + 2;
912 }
913 ptr += 2;
914 do {
915 // Turn on continuation bit in the byte we just wrote.
916 ptr[-1] |= static_cast<uint8_t>(0x80);
917 value >>= 7;
918 *ptr = static_cast<uint8_t>(value);
919 ++ptr;
920 } while (value >= 0x80);
921 return ptr;
922 }
923
924 PROTOBUF_ALWAYS_INLINE static uint8_t* UnsafeWriteSize(uint32_t value,
925 uint8_t* ptr) {
926 while (PROTOBUF_PREDICT_FALSE(value >= 0x80)) {
927 *ptr = static_cast<uint8_t>(value | 0x80);
928 value >>= 7;
929 ++ptr;
930 }
931 *ptr++ = static_cast<uint8_t>(value);
932 return ptr;
933 }
934
935 template <int S>
936 uint8_t* WriteRawLittleEndian(const void* data, int size, uint8_t* ptr);
937#if !defined(PROTOBUF_LITTLE_ENDIAN) || \
938 defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST)
939 uint8_t* WriteRawLittleEndian32(const void* data, int size, uint8_t* ptr);
940 uint8_t* WriteRawLittleEndian64(const void* data, int size, uint8_t* ptr);
941#endif
942
943 // These methods are for CodedOutputStream. Ideally they should be private
944 // but to match current behavior of CodedOutputStream as close as possible
945 // we allow it some functionality.
946 public:
947 uint8_t* SetInitialBuffer(void* data, int size) {
948 auto ptr = static_cast<uint8_t*>(data);
949 if (size > kSlopBytes) {
950 end_ = ptr + size - kSlopBytes;
951 buffer_end_ = nullptr;
952 return ptr;
953 } else {
954 end_ = buffer_ + size;
955 buffer_end_ = ptr;
956 return buffer_;
957 }
958 }
959
960 private:
961 // Needed by CodedOutputStream HadError. HadError needs to flush the patch
962 // buffers to ensure there is no error as of yet.
963 uint8_t* FlushAndResetBuffer(uint8_t*);
964
965 // The following functions mimic the old CodedOutputStream behavior as close
966 // as possible. They flush the current state to the stream, behave as
967 // the old CodedOutputStream and then return to normal operation.
968 bool Skip(int count, uint8_t** pp);
969 bool GetDirectBufferPointer(void** data, int* size, uint8_t** pp);
970 uint8_t* GetDirectBufferForNBytesAndAdvance(int size, uint8_t** pp);
971
972 friend class CodedOutputStream;
973};
974
975template <>
976inline uint8_t* EpsCopyOutputStream::WriteRawLittleEndian<1>(const void* data,
977 int size,
978 uint8_t* ptr) {
979 return WriteRaw(data, size, ptr);
980}
981template <>
982inline uint8_t* EpsCopyOutputStream::WriteRawLittleEndian<4>(const void* data,
983 int size,
984 uint8_t* ptr) {
985#if defined(PROTOBUF_LITTLE_ENDIAN) && \
986 !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST)
987 return WriteRaw(data, size, ptr);
988#else
989 return WriteRawLittleEndian32(data, size, ptr);
990#endif
991}
992template <>
993inline uint8_t* EpsCopyOutputStream::WriteRawLittleEndian<8>(const void* data,
994 int size,
995 uint8_t* ptr) {
996#if defined(PROTOBUF_LITTLE_ENDIAN) && \
997 !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST)
998 return WriteRaw(data, size, ptr);
999#else
1000 return WriteRawLittleEndian64(data, size, ptr);
1001#endif
1002}
1003
1004// Class which encodes and writes binary data which is composed of varint-
1005// encoded integers and fixed-width pieces. Wraps a ZeroCopyOutputStream.
1006// Most users will not need to deal with CodedOutputStream.
1007//
1008// Most methods of CodedOutputStream which return a bool return false if an
1009// underlying I/O error occurs. Once such a failure occurs, the
1010// CodedOutputStream is broken and is no longer useful. The Write* methods do
1011// not return the stream status, but will invalidate the stream if an error
1012// occurs. The client can probe HadError() to determine the status.
1013//
1014// Note that every method of CodedOutputStream which writes some data has
1015// a corresponding static "ToArray" version. These versions write directly
1016// to the provided buffer, returning a pointer past the last written byte.
1017// They require that the buffer has sufficient capacity for the encoded data.
1018// This allows an optimization where we check if an output stream has enough
1019// space for an entire message before we start writing and, if there is, we
1020// call only the ToArray methods to avoid doing bound checks for each
1021// individual value.
1022// i.e., in the example above:
1023//
1024// CodedOutputStream* coded_output = new CodedOutputStream(raw_output);
1025// int magic_number = 1234;
1026// char text[] = "Hello world!";
1027//
1028// int coded_size = sizeof(magic_number) +
1029// CodedOutputStream::VarintSize32(strlen(text)) +
1030// strlen(text);
1031//
1032// uint8_t* buffer =
1033// coded_output->GetDirectBufferForNBytesAndAdvance(coded_size);
1034// if (buffer != nullptr) {
1035// // The output stream has enough space in the buffer: write directly to
1036// // the array.
1037// buffer = CodedOutputStream::WriteLittleEndian32ToArray(magic_number,
1038// buffer);
1039// buffer = CodedOutputStream::WriteVarint32ToArray(strlen(text), buffer);
1040// buffer = CodedOutputStream::WriteRawToArray(text, strlen(text), buffer);
1041// } else {
1042// // Make bound-checked writes, which will ask the underlying stream for
1043// // more space as needed.
1044// coded_output->WriteLittleEndian32(magic_number);
1045// coded_output->WriteVarint32(strlen(text));
1046// coded_output->WriteRaw(text, strlen(text));
1047// }
1048//
1049// delete coded_output;
1050class PROTOBUF_EXPORT CodedOutputStream {
1051 public:
1052 // Creates a CodedOutputStream that writes to the given `stream`.
1053 // The provided stream must publicly derive from `ZeroCopyOutputStream`.
1054 template <class Stream, class = typename std::enable_if<std::is_base_of<
1055 ZeroCopyOutputStream, Stream>::value>::type>
1056 explicit CodedOutputStream(Stream* stream);
1057
1058 // Creates a CodedOutputStream that writes to the given `stream`, and does
1059 // an 'eager initialization' of the internal state if `eager_init` is true.
1060 // The provided stream must publicly derive from `ZeroCopyOutputStream`.
1061 template <class Stream, class = typename std::enable_if<std::is_base_of<
1062 ZeroCopyOutputStream, Stream>::value>::type>
1063 CodedOutputStream(Stream* stream, bool eager_init);
1064
1065 // Destroy the CodedOutputStream and position the underlying
1066 // ZeroCopyOutputStream immediately after the last byte written.
1067 ~CodedOutputStream();
1068
1069 // Returns true if there was an underlying I/O error since this object was
1070 // created. On should call Trim before this function in order to catch all
1071 // errors.
1072 bool HadError() {
1073 cur_ = impl_.FlushAndResetBuffer(cur_);
1074 GOOGLE_DCHECK(cur_);
1075 return impl_.HadError();
1076 }
1077
1078 // Trims any unused space in the underlying buffer so that its size matches
1079 // the number of bytes written by this stream. The underlying buffer will
1080 // automatically be trimmed when this stream is destroyed; this call is only
1081 // necessary if the underlying buffer is accessed *before* the stream is
1082 // destroyed.
1083 void Trim() { cur_ = impl_.Trim(ptr: cur_); }
1084
1085 // Skips a number of bytes, leaving the bytes unmodified in the underlying
1086 // buffer. Returns false if an underlying write error occurs. This is
1087 // mainly useful with GetDirectBufferPointer().
1088 // Note of caution, the skipped bytes may contain uninitialized data. The
1089 // caller must make sure that the skipped bytes are properly initialized,
1090 // otherwise you might leak bytes from your heap.
1091 bool Skip(int count) { return impl_.Skip(count, pp: &cur_); }
1092
1093 // Sets *data to point directly at the unwritten part of the
1094 // CodedOutputStream's underlying buffer, and *size to the size of that
1095 // buffer, but does not advance the stream's current position. This will
1096 // always either produce a non-empty buffer or return false. If the caller
1097 // writes any data to this buffer, it should then call Skip() to skip over
1098 // the consumed bytes. This may be useful for implementing external fast
1099 // serialization routines for types of data not covered by the
1100 // CodedOutputStream interface.
1101 bool GetDirectBufferPointer(void** data, int* size) {
1102 return impl_.GetDirectBufferPointer(data, size, pp: &cur_);
1103 }
1104
1105 // If there are at least "size" bytes available in the current buffer,
1106 // returns a pointer directly into the buffer and advances over these bytes.
1107 // The caller may then write directly into this buffer (e.g. using the
1108 // *ToArray static methods) rather than go through CodedOutputStream. If
1109 // there are not enough bytes available, returns NULL. The return pointer is
1110 // invalidated as soon as any other non-const method of CodedOutputStream
1111 // is called.
1112 inline uint8_t* GetDirectBufferForNBytesAndAdvance(int size) {
1113 return impl_.GetDirectBufferForNBytesAndAdvance(size, pp: &cur_);
1114 }
1115
1116 // Write raw bytes, copying them from the given buffer.
1117 void WriteRaw(const void* buffer, int size) {
1118 cur_ = impl_.WriteRaw(data: buffer, size, ptr: cur_);
1119 }
1120 // Like WriteRaw() but will try to write aliased data if aliasing is
1121 // turned on.
1122 void WriteRawMaybeAliased(const void* data, int size);
1123 // Like WriteRaw() but writing directly to the target array.
1124 // This is _not_ inlined, as the compiler often optimizes memcpy into inline
1125 // copy loops. Since this gets called by every field with string or bytes
1126 // type, inlining may lead to a significant amount of code bloat, with only a
1127 // minor performance gain.
1128 static uint8_t* WriteRawToArray(const void* buffer, int size,
1129 uint8_t* target);
1130
1131 // Equivalent to WriteRaw(str.data(), str.size()).
1132 void WriteString(const std::string& str);
1133 // Like WriteString() but writing directly to the target array.
1134 static uint8_t* WriteStringToArray(const std::string& str, uint8_t* target);
1135 // Write the varint-encoded size of str followed by str.
1136 static uint8_t* WriteStringWithSizeToArray(const std::string& str,
1137 uint8_t* target);
1138
1139
1140 // Write a 32-bit little-endian integer.
1141 void WriteLittleEndian32(uint32_t value) {
1142 cur_ = impl_.EnsureSpace(ptr: cur_);
1143 SetCur(WriteLittleEndian32ToArray(value, target: Cur()));
1144 }
1145 // Like WriteLittleEndian32() but writing directly to the target array.
1146 static uint8_t* WriteLittleEndian32ToArray(uint32_t value, uint8_t* target);
1147 // Write a 64-bit little-endian integer.
1148 void WriteLittleEndian64(uint64_t value) {
1149 cur_ = impl_.EnsureSpace(ptr: cur_);
1150 SetCur(WriteLittleEndian64ToArray(value, target: Cur()));
1151 }
1152 // Like WriteLittleEndian64() but writing directly to the target array.
1153 static uint8_t* WriteLittleEndian64ToArray(uint64_t value, uint8_t* target);
1154
1155 // Write an unsigned integer with Varint encoding. Writing a 32-bit value
1156 // is equivalent to casting it to uint64_t and writing it as a 64-bit value,
1157 // but may be more efficient.
1158 void WriteVarint32(uint32_t value);
1159 // Like WriteVarint32() but writing directly to the target array.
1160 static uint8_t* WriteVarint32ToArray(uint32_t value, uint8_t* target);
1161 // Like WriteVarint32() but writing directly to the target array, and with
1162 // the less common-case paths being out of line rather than inlined.
1163 static uint8_t* WriteVarint32ToArrayOutOfLine(uint32_t value,
1164 uint8_t* target);
1165 // Write an unsigned integer with Varint encoding.
1166 void WriteVarint64(uint64_t value);
1167 // Like WriteVarint64() but writing directly to the target array.
1168 static uint8_t* WriteVarint64ToArray(uint64_t value, uint8_t* target);
1169
1170 // Equivalent to WriteVarint32() except when the value is negative,
1171 // in which case it must be sign-extended to a full 10 bytes.
1172 void WriteVarint32SignExtended(int32_t value);
1173 // Like WriteVarint32SignExtended() but writing directly to the target array.
1174 static uint8_t* WriteVarint32SignExtendedToArray(int32_t value,
1175 uint8_t* target);
1176
1177 // This is identical to WriteVarint32(), but optimized for writing tags.
1178 // In particular, if the input is a compile-time constant, this method
1179 // compiles down to a couple instructions.
1180 // Always inline because otherwise the aforementioned optimization can't work,
1181 // but GCC by default doesn't want to inline this.
1182 void WriteTag(uint32_t value);
1183 // Like WriteTag() but writing directly to the target array.
1184 PROTOBUF_ALWAYS_INLINE
1185 static uint8_t* WriteTagToArray(uint32_t value, uint8_t* target);
1186
1187 // Returns the number of bytes needed to encode the given value as a varint.
1188 static size_t VarintSize32(uint32_t value);
1189 // Returns the number of bytes needed to encode the given value as a varint.
1190 static size_t VarintSize64(uint64_t value);
1191
1192 // If negative, 10 bytes. Otherwise, same as VarintSize32().
1193 static size_t VarintSize32SignExtended(int32_t value);
1194
1195 // Same as above, plus one. The additional one comes at no compute cost.
1196 static size_t VarintSize32PlusOne(uint32_t value);
1197 static size_t VarintSize64PlusOne(uint64_t value);
1198 static size_t VarintSize32SignExtendedPlusOne(int32_t value);
1199
1200 // Compile-time equivalent of VarintSize32().
1201 template <uint32_t Value>
1202 struct StaticVarintSize32 {
1203 static const size_t value = (Value < (1 << 7)) ? 1
1204 : (Value < (1 << 14)) ? 2
1205 : (Value < (1 << 21)) ? 3
1206 : (Value < (1 << 28)) ? 4
1207 : 5;
1208 };
1209
1210 // Returns the total number of bytes written since this object was created.
1211 int ByteCount() const {
1212 return static_cast<int>(impl_.ByteCount(ptr: cur_) - start_count_);
1213 }
1214
1215 // Instructs the CodedOutputStream to allow the underlying
1216 // ZeroCopyOutputStream to hold pointers to the original structure instead of
1217 // copying, if it supports it (i.e. output->AllowsAliasing() is true). If the
1218 // underlying stream does not support aliasing, then enabling it has no
1219 // affect. For now, this only affects the behavior of
1220 // WriteRawMaybeAliased().
1221 //
1222 // NOTE: It is caller's responsibility to ensure that the chunk of memory
1223 // remains live until all of the data has been consumed from the stream.
1224 void EnableAliasing(bool enabled) { impl_.EnableAliasing(enabled); }
1225
1226 // Indicate to the serializer whether the user wants deterministic
1227 // serialization. The default when this is not called comes from the global
1228 // default, controlled by SetDefaultSerializationDeterministic.
1229 //
1230 // What deterministic serialization means is entirely up to the driver of the
1231 // serialization process (i.e. the caller of methods like WriteVarint32). In
1232 // the case of serializing a proto buffer message using one of the methods of
1233 // MessageLite, this means that for a given binary equal messages will always
1234 // be serialized to the same bytes. This implies:
1235 //
1236 // * Repeated serialization of a message will return the same bytes.
1237 //
1238 // * Different processes running the same binary (including on different
1239 // machines) will serialize equal messages to the same bytes.
1240 //
1241 // Note that this is *not* canonical across languages. It is also unstable
1242 // across different builds with intervening message definition changes, due to
1243 // unknown fields. Users who need canonical serialization (e.g. persistent
1244 // storage in a canonical form, fingerprinting) should define their own
1245 // canonicalization specification and implement the serializer using
1246 // reflection APIs rather than relying on this API.
1247 void SetSerializationDeterministic(bool value) {
1248 impl_.SetSerializationDeterministic(value);
1249 }
1250
1251 // Return whether the user wants deterministic serialization. See above.
1252 bool IsSerializationDeterministic() const {
1253 return impl_.IsSerializationDeterministic();
1254 }
1255
1256 static bool IsDefaultSerializationDeterministic() {
1257 return default_serialization_deterministic_.load(
1258 m: std::memory_order_relaxed) != 0;
1259 }
1260
1261 template <typename Func>
1262 void Serialize(const Func& func);
1263
1264 uint8_t* Cur() const { return cur_; }
1265 void SetCur(uint8_t* ptr) { cur_ = ptr; }
1266 EpsCopyOutputStream* EpsCopy() { return &impl_; }
1267
1268 private:
1269 template <class Stream>
1270 void InitEagerly(Stream* stream);
1271
1272 EpsCopyOutputStream impl_;
1273 uint8_t* cur_;
1274 int64_t start_count_;
1275 static std::atomic<bool> default_serialization_deterministic_;
1276
1277 // See above. Other projects may use "friend" to allow them to call this.
1278 // After SetDefaultSerializationDeterministic() completes, all protocol
1279 // buffer serializations will be deterministic by default. Thread safe.
1280 // However, the meaning of "after" is subtle here: to be safe, each thread
1281 // that wants deterministic serialization by default needs to call
1282 // SetDefaultSerializationDeterministic() or ensure on its own that another
1283 // thread has done so.
1284 friend void internal::MapTestForceDeterministic();
1285 static void SetDefaultSerializationDeterministic() {
1286 default_serialization_deterministic_.store(i: true, m: std::memory_order_relaxed);
1287 }
1288 // REQUIRES: value >= 0x80, and that (value & 7f) has been written to *target.
1289 static uint8_t* WriteVarint32ToArrayOutOfLineHelper(uint32_t value,
1290 uint8_t* target);
1291 GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(CodedOutputStream);
1292};
1293
1294// inline methods ====================================================
1295// The vast majority of varints are only one byte. These inline
1296// methods optimize for that case.
1297
1298inline bool CodedInputStream::ReadVarint32(uint32_t* value) {
1299 uint32_t v = 0;
1300 if (PROTOBUF_PREDICT_TRUE(buffer_ < buffer_end_)) {
1301 v = *buffer_;
1302 if (v < 0x80) {
1303 *value = v;
1304 Advance(amount: 1);
1305 return true;
1306 }
1307 }
1308 int64_t result = ReadVarint32Fallback(first_byte_or_zero: v);
1309 *value = static_cast<uint32_t>(result);
1310 return result >= 0;
1311}
1312
1313inline bool CodedInputStream::ReadVarint64(uint64_t* value) {
1314 if (PROTOBUF_PREDICT_TRUE(buffer_ < buffer_end_) && *buffer_ < 0x80) {
1315 *value = *buffer_;
1316 Advance(amount: 1);
1317 return true;
1318 }
1319 std::pair<uint64_t, bool> p = ReadVarint64Fallback();
1320 *value = p.first;
1321 return p.second;
1322}
1323
1324inline bool CodedInputStream::ReadVarintSizeAsInt(int* value) {
1325 if (PROTOBUF_PREDICT_TRUE(buffer_ < buffer_end_)) {
1326 int v = *buffer_;
1327 if (v < 0x80) {
1328 *value = v;
1329 Advance(amount: 1);
1330 return true;
1331 }
1332 }
1333 *value = ReadVarintSizeAsIntFallback();
1334 return *value >= 0;
1335}
1336
1337// static
1338inline const uint8_t* CodedInputStream::ReadLittleEndian32FromArray(
1339 const uint8_t* buffer, uint32_t* value) {
1340#if defined(PROTOBUF_LITTLE_ENDIAN) && \
1341 !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST)
1342 memcpy(dest: value, src: buffer, n: sizeof(*value));
1343 return buffer + sizeof(*value);
1344#else
1345 *value = (static_cast<uint32_t>(buffer[0])) |
1346 (static_cast<uint32_t>(buffer[1]) << 8) |
1347 (static_cast<uint32_t>(buffer[2]) << 16) |
1348 (static_cast<uint32_t>(buffer[3]) << 24);
1349 return buffer + sizeof(*value);
1350#endif
1351}
1352// static
1353inline const uint8_t* CodedInputStream::ReadLittleEndian64FromArray(
1354 const uint8_t* buffer, uint64_t* value) {
1355#if defined(PROTOBUF_LITTLE_ENDIAN) && \
1356 !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST)
1357 memcpy(dest: value, src: buffer, n: sizeof(*value));
1358 return buffer + sizeof(*value);
1359#else
1360 uint32_t part0 = (static_cast<uint32_t>(buffer[0])) |
1361 (static_cast<uint32_t>(buffer[1]) << 8) |
1362 (static_cast<uint32_t>(buffer[2]) << 16) |
1363 (static_cast<uint32_t>(buffer[3]) << 24);
1364 uint32_t part1 = (static_cast<uint32_t>(buffer[4])) |
1365 (static_cast<uint32_t>(buffer[5]) << 8) |
1366 (static_cast<uint32_t>(buffer[6]) << 16) |
1367 (static_cast<uint32_t>(buffer[7]) << 24);
1368 *value = static_cast<uint64_t>(part0) | (static_cast<uint64_t>(part1) << 32);
1369 return buffer + sizeof(*value);
1370#endif
1371}
1372
1373inline bool CodedInputStream::ReadLittleEndian32(uint32_t* value) {
1374#if defined(PROTOBUF_LITTLE_ENDIAN) && \
1375 !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST)
1376 if (PROTOBUF_PREDICT_TRUE(BufferSize() >= static_cast<int>(sizeof(*value)))) {
1377 buffer_ = ReadLittleEndian32FromArray(buffer: buffer_, value);
1378 return true;
1379 } else {
1380 return ReadLittleEndian32Fallback(value);
1381 }
1382#else
1383 return ReadLittleEndian32Fallback(value);
1384#endif
1385}
1386
1387inline bool CodedInputStream::ReadLittleEndian64(uint64_t* value) {
1388#if defined(PROTOBUF_LITTLE_ENDIAN) && \
1389 !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST)
1390 if (PROTOBUF_PREDICT_TRUE(BufferSize() >= static_cast<int>(sizeof(*value)))) {
1391 buffer_ = ReadLittleEndian64FromArray(buffer: buffer_, value);
1392 return true;
1393 } else {
1394 return ReadLittleEndian64Fallback(value);
1395 }
1396#else
1397 return ReadLittleEndian64Fallback(value);
1398#endif
1399}
1400
1401inline uint32_t CodedInputStream::ReadTagNoLastTag() {
1402 uint32_t v = 0;
1403 if (PROTOBUF_PREDICT_TRUE(buffer_ < buffer_end_)) {
1404 v = *buffer_;
1405 if (v < 0x80) {
1406 Advance(amount: 1);
1407 return v;
1408 }
1409 }
1410 v = ReadTagFallback(first_byte_or_zero: v);
1411 return v;
1412}
1413
1414inline std::pair<uint32_t, bool> CodedInputStream::ReadTagWithCutoffNoLastTag(
1415 uint32_t cutoff) {
1416 // In performance-sensitive code we can expect cutoff to be a compile-time
1417 // constant, and things like "cutoff >= kMax1ByteVarint" to be evaluated at
1418 // compile time.
1419 uint32_t first_byte_or_zero = 0;
1420 if (PROTOBUF_PREDICT_TRUE(buffer_ < buffer_end_)) {
1421 // Hot case: buffer_ non_empty, buffer_[0] in [1, 128).
1422 // TODO(gpike): Is it worth rearranging this? E.g., if the number of fields
1423 // is large enough then is it better to check for the two-byte case first?
1424 first_byte_or_zero = buffer_[0];
1425 if (static_cast<int8_t>(buffer_[0]) > 0) {
1426 const uint32_t kMax1ByteVarint = 0x7f;
1427 uint32_t tag = buffer_[0];
1428 Advance(amount: 1);
1429 return std::make_pair(x&: tag, y: cutoff >= kMax1ByteVarint || tag <= cutoff);
1430 }
1431 // Other hot case: cutoff >= 0x80, buffer_ has at least two bytes available,
1432 // and tag is two bytes. The latter is tested by bitwise-and-not of the
1433 // first byte and the second byte.
1434 if (cutoff >= 0x80 && PROTOBUF_PREDICT_TRUE(buffer_ + 1 < buffer_end_) &&
1435 PROTOBUF_PREDICT_TRUE((buffer_[0] & ~buffer_[1]) >= 0x80)) {
1436 const uint32_t kMax2ByteVarint = (0x7f << 7) + 0x7f;
1437 uint32_t tag = (1u << 7) * buffer_[1] + (buffer_[0] - 0x80);
1438 Advance(amount: 2);
1439 // It might make sense to test for tag == 0 now, but it is so rare that
1440 // that we don't bother. A varint-encoded 0 should be one byte unless
1441 // the encoder lost its mind. The second part of the return value of
1442 // this function is allowed to be either true or false if the tag is 0,
1443 // so we don't have to check for tag == 0. We may need to check whether
1444 // it exceeds cutoff.
1445 bool at_or_below_cutoff = cutoff >= kMax2ByteVarint || tag <= cutoff;
1446 return std::make_pair(x&: tag, y&: at_or_below_cutoff);
1447 }
1448 }
1449 // Slow path
1450 const uint32_t tag = ReadTagFallback(first_byte_or_zero);
1451 return std::make_pair(x: tag, y: static_cast<uint32_t>(tag - 1) < cutoff);
1452}
1453
1454inline bool CodedInputStream::LastTagWas(uint32_t expected) {
1455 return last_tag_ == expected;
1456}
1457
1458inline bool CodedInputStream::ConsumedEntireMessage() {
1459 return legitimate_message_end_;
1460}
1461
1462inline bool CodedInputStream::ExpectTag(uint32_t expected) {
1463 if (expected < (1 << 7)) {
1464 if (PROTOBUF_PREDICT_TRUE(buffer_ < buffer_end_) &&
1465 buffer_[0] == expected) {
1466 Advance(amount: 1);
1467 return true;
1468 } else {
1469 return false;
1470 }
1471 } else if (expected < (1 << 14)) {
1472 if (PROTOBUF_PREDICT_TRUE(BufferSize() >= 2) &&
1473 buffer_[0] == static_cast<uint8_t>(expected | 0x80) &&
1474 buffer_[1] == static_cast<uint8_t>(expected >> 7)) {
1475 Advance(amount: 2);
1476 return true;
1477 } else {
1478 return false;
1479 }
1480 } else {
1481 // Don't bother optimizing for larger values.
1482 return false;
1483 }
1484}
1485
1486inline const uint8_t* CodedInputStream::ExpectTagFromArray(
1487 const uint8_t* buffer, uint32_t expected) {
1488 if (expected < (1 << 7)) {
1489 if (buffer[0] == expected) {
1490 return buffer + 1;
1491 }
1492 } else if (expected < (1 << 14)) {
1493 if (buffer[0] == static_cast<uint8_t>(expected | 0x80) &&
1494 buffer[1] == static_cast<uint8_t>(expected >> 7)) {
1495 return buffer + 2;
1496 }
1497 }
1498 return nullptr;
1499}
1500
1501inline void CodedInputStream::GetDirectBufferPointerInline(const void** data,
1502 int* size) {
1503 *data = buffer_;
1504 *size = static_cast<int>(buffer_end_ - buffer_);
1505}
1506
1507inline bool CodedInputStream::ExpectAtEnd() {
1508 // If we are at a limit we know no more bytes can be read. Otherwise, it's
1509 // hard to say without calling Refresh(), and we'd rather not do that.
1510
1511 if (buffer_ == buffer_end_ && ((buffer_size_after_limit_ != 0) ||
1512 (total_bytes_read_ == current_limit_))) {
1513 last_tag_ = 0; // Pretend we called ReadTag()...
1514 legitimate_message_end_ = true; // ... and it hit EOF.
1515 return true;
1516 } else {
1517 return false;
1518 }
1519}
1520
1521inline int CodedInputStream::CurrentPosition() const {
1522 return total_bytes_read_ - (BufferSize() + buffer_size_after_limit_);
1523}
1524
1525inline void CodedInputStream::Advance(int amount) { buffer_ += amount; }
1526
1527inline void CodedInputStream::SetRecursionLimit(int limit) {
1528 recursion_budget_ += limit - recursion_limit_;
1529 recursion_limit_ = limit;
1530}
1531
1532inline bool CodedInputStream::IncrementRecursionDepth() {
1533 --recursion_budget_;
1534 return recursion_budget_ >= 0;
1535}
1536
1537inline void CodedInputStream::DecrementRecursionDepth() {
1538 if (recursion_budget_ < recursion_limit_) ++recursion_budget_;
1539}
1540
1541inline void CodedInputStream::UnsafeDecrementRecursionDepth() {
1542 assert(recursion_budget_ < recursion_limit_);
1543 ++recursion_budget_;
1544}
1545
1546inline void CodedInputStream::SetExtensionRegistry(const DescriptorPool* pool,
1547 MessageFactory* factory) {
1548 extension_pool_ = pool;
1549 extension_factory_ = factory;
1550}
1551
1552inline const DescriptorPool* CodedInputStream::GetExtensionPool() {
1553 return extension_pool_;
1554}
1555
1556inline MessageFactory* CodedInputStream::GetExtensionFactory() {
1557 return extension_factory_;
1558}
1559
1560inline int CodedInputStream::BufferSize() const {
1561 return static_cast<int>(buffer_end_ - buffer_);
1562}
1563
1564inline CodedInputStream::CodedInputStream(ZeroCopyInputStream* input)
1565 : buffer_(nullptr),
1566 buffer_end_(nullptr),
1567 input_(input),
1568 total_bytes_read_(0),
1569 overflow_bytes_(0),
1570 last_tag_(0),
1571 legitimate_message_end_(false),
1572 aliasing_enabled_(false),
1573 current_limit_(std::numeric_limits<int32_t>::max()),
1574 buffer_size_after_limit_(0),
1575 total_bytes_limit_(kDefaultTotalBytesLimit),
1576 recursion_budget_(default_recursion_limit_),
1577 recursion_limit_(default_recursion_limit_),
1578 extension_pool_(nullptr),
1579 extension_factory_(nullptr) {
1580 // Eagerly Refresh() so buffer space is immediately available.
1581 Refresh();
1582}
1583
1584inline CodedInputStream::CodedInputStream(const uint8_t* buffer, int size)
1585 : buffer_(buffer),
1586 buffer_end_(buffer + size),
1587 input_(nullptr),
1588 total_bytes_read_(size),
1589 overflow_bytes_(0),
1590 last_tag_(0),
1591 legitimate_message_end_(false),
1592 aliasing_enabled_(false),
1593 current_limit_(size),
1594 buffer_size_after_limit_(0),
1595 total_bytes_limit_(kDefaultTotalBytesLimit),
1596 recursion_budget_(default_recursion_limit_),
1597 recursion_limit_(default_recursion_limit_),
1598 extension_pool_(nullptr),
1599 extension_factory_(nullptr) {
1600 // Note that setting current_limit_ == size is important to prevent some
1601 // code paths from trying to access input_ and segfaulting.
1602}
1603
1604inline bool CodedInputStream::IsFlat() const { return input_ == nullptr; }
1605
1606inline bool CodedInputStream::Skip(int count) {
1607 if (count < 0) return false; // security: count is often user-supplied
1608
1609 const int original_buffer_size = BufferSize();
1610
1611 if (count <= original_buffer_size) {
1612 // Just skipping within the current buffer. Easy.
1613 Advance(amount: count);
1614 return true;
1615 }
1616
1617 return SkipFallback(count, original_buffer_size);
1618}
1619
1620template <class Stream, class>
1621inline CodedOutputStream::CodedOutputStream(Stream* stream)
1622 : impl_(stream, IsDefaultSerializationDeterministic(), &cur_),
1623 start_count_(stream->ByteCount()) {
1624 InitEagerly(stream);
1625}
1626
1627template <class Stream, class>
1628inline CodedOutputStream::CodedOutputStream(Stream* stream, bool eager_init)
1629 : impl_(stream, IsDefaultSerializationDeterministic(), &cur_),
1630 start_count_(stream->ByteCount()) {
1631 if (eager_init) {
1632 InitEagerly(stream);
1633 }
1634}
1635
1636template <class Stream>
1637inline void CodedOutputStream::InitEagerly(Stream* stream) {
1638 void* data;
1639 int size;
1640 if (PROTOBUF_PREDICT_TRUE(stream->Next(&data, &size) && size > 0)) {
1641 cur_ = impl_.SetInitialBuffer(data, size);
1642 }
1643}
1644
1645inline uint8_t* CodedOutputStream::WriteVarint32ToArray(uint32_t value,
1646 uint8_t* target) {
1647 return EpsCopyOutputStream::UnsafeVarint(value, ptr: target);
1648}
1649
1650inline uint8_t* CodedOutputStream::WriteVarint32ToArrayOutOfLine(
1651 uint32_t value, uint8_t* target) {
1652 target[0] = static_cast<uint8_t>(value);
1653 if (value < 0x80) {
1654 return target + 1;
1655 } else {
1656 return WriteVarint32ToArrayOutOfLineHelper(value, target);
1657 }
1658}
1659
1660inline uint8_t* CodedOutputStream::WriteVarint64ToArray(uint64_t value,
1661 uint8_t* target) {
1662 return EpsCopyOutputStream::UnsafeVarint(value, ptr: target);
1663}
1664
1665inline void CodedOutputStream::WriteVarint32SignExtended(int32_t value) {
1666 WriteVarint64(value: static_cast<uint64_t>(value));
1667}
1668
1669inline uint8_t* CodedOutputStream::WriteVarint32SignExtendedToArray(
1670 int32_t value, uint8_t* target) {
1671 return WriteVarint64ToArray(value: static_cast<uint64_t>(value), target);
1672}
1673
1674inline uint8_t* CodedOutputStream::WriteLittleEndian32ToArray(uint32_t value,
1675 uint8_t* target) {
1676#if defined(PROTOBUF_LITTLE_ENDIAN) && \
1677 !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST)
1678 memcpy(dest: target, src: &value, n: sizeof(value));
1679#else
1680 target[0] = static_cast<uint8_t>(value);
1681 target[1] = static_cast<uint8_t>(value >> 8);
1682 target[2] = static_cast<uint8_t>(value >> 16);
1683 target[3] = static_cast<uint8_t>(value >> 24);
1684#endif
1685 return target + sizeof(value);
1686}
1687
1688inline uint8_t* CodedOutputStream::WriteLittleEndian64ToArray(uint64_t value,
1689 uint8_t* target) {
1690#if defined(PROTOBUF_LITTLE_ENDIAN) && \
1691 !defined(PROTOBUF_DISABLE_LITTLE_ENDIAN_OPT_FOR_TEST)
1692 memcpy(dest: target, src: &value, n: sizeof(value));
1693#else
1694 uint32_t part0 = static_cast<uint32_t>(value);
1695 uint32_t part1 = static_cast<uint32_t>(value >> 32);
1696
1697 target[0] = static_cast<uint8_t>(part0);
1698 target[1] = static_cast<uint8_t>(part0 >> 8);
1699 target[2] = static_cast<uint8_t>(part0 >> 16);
1700 target[3] = static_cast<uint8_t>(part0 >> 24);
1701 target[4] = static_cast<uint8_t>(part1);
1702 target[5] = static_cast<uint8_t>(part1 >> 8);
1703 target[6] = static_cast<uint8_t>(part1 >> 16);
1704 target[7] = static_cast<uint8_t>(part1 >> 24);
1705#endif
1706 return target + sizeof(value);
1707}
1708
1709inline void CodedOutputStream::WriteVarint32(uint32_t value) {
1710 cur_ = impl_.EnsureSpace(ptr: cur_);
1711 SetCur(WriteVarint32ToArray(value, target: Cur()));
1712}
1713
1714inline void CodedOutputStream::WriteVarint64(uint64_t value) {
1715 cur_ = impl_.EnsureSpace(ptr: cur_);
1716 SetCur(WriteVarint64ToArray(value, target: Cur()));
1717}
1718
1719inline void CodedOutputStream::WriteTag(uint32_t value) {
1720 WriteVarint32(value);
1721}
1722
1723inline uint8_t* CodedOutputStream::WriteTagToArray(uint32_t value,
1724 uint8_t* target) {
1725 return WriteVarint32ToArray(value, target);
1726}
1727
1728inline size_t CodedOutputStream::VarintSize32(uint32_t value) {
1729 // This computes value == 0 ? 1 : floor(log2(value)) / 7 + 1
1730 // Use an explicit multiplication to implement the divide of
1731 // a number in the 1..31 range.
1732 // Explicit OR 0x1 to avoid calling Bits::Log2FloorNonZero(0), which is
1733 // undefined.
1734 uint32_t log2value = Bits::Log2FloorNonZero(n: value | 0x1);
1735 return static_cast<size_t>((log2value * 9 + 73) / 64);
1736}
1737
1738inline size_t CodedOutputStream::VarintSize32PlusOne(uint32_t value) {
1739 // Same as above, but one more.
1740 uint32_t log2value = Bits::Log2FloorNonZero(n: value | 0x1);
1741 return static_cast<size_t>((log2value * 9 + 73 + 64) / 64);
1742}
1743
1744inline size_t CodedOutputStream::VarintSize64(uint64_t value) {
1745 // This computes value == 0 ? 1 : floor(log2(value)) / 7 + 1
1746 // Use an explicit multiplication to implement the divide of
1747 // a number in the 1..63 range.
1748 // Explicit OR 0x1 to avoid calling Bits::Log2FloorNonZero(0), which is
1749 // undefined.
1750 uint32_t log2value = Bits::Log2FloorNonZero64(n: value | 0x1);
1751 return static_cast<size_t>((log2value * 9 + 73) / 64);
1752}
1753
1754inline size_t CodedOutputStream::VarintSize64PlusOne(uint64_t value) {
1755 // Same as above, but one more.
1756 uint32_t log2value = Bits::Log2FloorNonZero64(n: value | 0x1);
1757 return static_cast<size_t>((log2value * 9 + 73 + 64) / 64);
1758}
1759
1760inline size_t CodedOutputStream::VarintSize32SignExtended(int32_t value) {
1761 return VarintSize64(value: static_cast<uint64_t>(int64_t{value}));
1762}
1763
1764inline size_t CodedOutputStream::VarintSize32SignExtendedPlusOne(
1765 int32_t value) {
1766 return VarintSize64PlusOne(value: static_cast<uint64_t>(int64_t{value}));
1767}
1768
1769inline void CodedOutputStream::WriteString(const std::string& str) {
1770 WriteRaw(buffer: str.data(), size: static_cast<int>(str.size()));
1771}
1772
1773inline void CodedOutputStream::WriteRawMaybeAliased(const void* data,
1774 int size) {
1775 cur_ = impl_.WriteRawMaybeAliased(data, size, ptr: cur_);
1776}
1777
1778inline uint8_t* CodedOutputStream::WriteRawToArray(const void* data, int size,
1779 uint8_t* target) {
1780 memcpy(dest: target, src: data, n: size);
1781 return target + size;
1782}
1783
1784inline uint8_t* CodedOutputStream::WriteStringToArray(const std::string& str,
1785 uint8_t* target) {
1786 return WriteRawToArray(data: str.data(), size: static_cast<int>(str.size()), target);
1787}
1788
1789} // namespace io
1790} // namespace protobuf
1791} // namespace google
1792
1793#if defined(_MSC_VER) && _MSC_VER >= 1300 && !defined(__INTEL_COMPILER)
1794#pragma runtime_checks("c", restore)
1795#endif // _MSC_VER && !defined(__INTEL_COMPILER)
1796
1797#include <google/protobuf/port_undef.inc>
1798
1799#endif // GOOGLE_PROTOBUF_IO_CODED_STREAM_H__
1800