您最多选择25个标签 标签必须以中文、字母或数字开头,可以包含连字符 (-),并且长度不得超过35个字符

parse_context.h 39 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025
  1. // Protocol Buffers - Google's data interchange format
  2. // Copyright 2008 Google Inc. All rights reserved.
  3. // https://developers.google.com/protocol-buffers/
  4. //
  5. // Redistribution and use in source and binary forms, with or without
  6. // modification, are permitted provided that the following conditions are
  7. // met:
  8. //
  9. // * Redistributions of source code must retain the above copyright
  10. // notice, this list of conditions and the following disclaimer.
  11. // * Redistributions in binary form must reproduce the above
  12. // copyright notice, this list of conditions and the following disclaimer
  13. // in the documentation and/or other materials provided with the
  14. // distribution.
  15. // * Neither the name of Google Inc. nor the names of its
  16. // contributors may be used to endorse or promote products derived from
  17. // this software without specific prior written permission.
  18. //
  19. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  20. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  21. // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  22. // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  23. // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  25. // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  26. // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  27. // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  28. // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  29. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30. #ifndef GOOGLE_PROTOBUF_PARSE_CONTEXT_H__
  31. #define GOOGLE_PROTOBUF_PARSE_CONTEXT_H__
  32. #include <cstdint>
  33. #include <cstring>
  34. #include <string>
  35. #include <type_traits>
  36. #include <google/protobuf/io/coded_stream.h>
  37. #include <google/protobuf/io/zero_copy_stream.h>
  38. #include <google/protobuf/arena.h>
  39. #include <google/protobuf/port.h>
  40. #include <google/protobuf/stubs/strutil.h>
  41. #include <google/protobuf/arenastring.h>
  42. #include <google/protobuf/endian.h>
  43. #include <google/protobuf/implicit_weak_message.h>
  44. #include <google/protobuf/inlined_string_field.h>
  45. #include <google/protobuf/metadata_lite.h>
  46. #include <google/protobuf/repeated_field.h>
  47. #include <google/protobuf/wire_format_lite.h>
  48. // Must be included last.
  49. #include <google/protobuf/port_def.inc>
  50. namespace google {
  51. namespace protobuf {
  52. class UnknownFieldSet;
  53. class DescriptorPool;
  54. class MessageFactory;
  55. namespace internal {
  56. // Template code below needs to know about the existence of these functions.
  57. PROTOBUF_EXPORT void WriteVarint(uint32_t num, uint64_t val, std::string* s);
  58. PROTOBUF_EXPORT void WriteLengthDelimited(uint32_t num, StringPiece val,
  59. std::string* s);
  60. // Inline because it is just forwarding to s->WriteVarint
  61. inline void WriteVarint(uint32_t num, uint64_t val, UnknownFieldSet* s);
  62. inline void WriteLengthDelimited(uint32_t num, StringPiece val,
  63. UnknownFieldSet* s);
  64. // The basic abstraction the parser is designed for is a slight modification
  65. // of the ZeroCopyInputStream (ZCIS) abstraction. A ZCIS presents a serialized
  66. // stream as a series of buffers that concatenate to the full stream.
  67. // Pictorially a ZCIS presents a stream in chunks like so
  68. // [---------------------------------------------------------------]
  69. // [---------------------] chunk 1
  70. // [----------------------------] chunk 2
  71. // chunk 3 [--------------]
  72. //
  73. // Where the '-' represent the bytes which are vertically lined up with the
  74. // bytes of the stream. The proto parser requires its input to be presented
  75. // similarly with the extra
  76. // property that each chunk has kSlopBytes past its end that overlaps with the
  77. // first kSlopBytes of the next chunk, or if there is no next chunk at least its
  78. // still valid to read those bytes. Again, pictorially, we now have
  79. //
  80. // [---------------------------------------------------------------]
  81. // [-------------------....] chunk 1
  82. // [------------------------....] chunk 2
  83. // chunk 3 [------------------..**]
  84. // chunk 4 [--****]
  85. // Here '-' mean the bytes of the stream or chunk and '.' means bytes past the
  86. // chunk that match up with the start of the next chunk. Above each chunk has
  87. // 4 '.' after the chunk. In the case these 'overflow' bytes represents bytes
  88. // past the stream, indicated by '*' above, their values are unspecified. It is
  89. // still legal to read them (ie. should not segfault). Reading past the
  90. // end should be detected by the user and indicated as an error.
  91. //
  92. // The reason for this, admittedly, unconventional invariant is to ruthlessly
  93. // optimize the protobuf parser. Having an overlap helps in two important ways.
  94. // Firstly it alleviates having to performing bounds checks if a piece of code
  95. // is guaranteed to not read more than kSlopBytes. Secondly, and more
  96. // importantly, the protobuf wireformat is such that reading a key/value pair is
  97. // always less than 16 bytes. This removes the need to change to next buffer in
  98. // the middle of reading primitive values. Hence there is no need to store and
  99. // load the current position.
  100. class PROTOBUF_EXPORT EpsCopyInputStream {
  101. public:
  102. enum { kSlopBytes = 16, kMaxCordBytesToCopy = 512 };
  103. explicit EpsCopyInputStream(bool enable_aliasing)
  104. : aliasing_(enable_aliasing ? kOnPatch : kNoAliasing) {}
  105. void BackUp(const char* ptr) {
  106. GOOGLE_DCHECK(ptr <= buffer_end_ + kSlopBytes);
  107. int count;
  108. if (next_chunk_ == buffer_) {
  109. count = static_cast<int>(buffer_end_ + kSlopBytes - ptr);
  110. } else {
  111. count = size_ + static_cast<int>(buffer_end_ - ptr);
  112. }
  113. if (count > 0) StreamBackUp(count);
  114. }
  115. // If return value is negative it's an error
  116. PROTOBUF_NODISCARD int PushLimit(const char* ptr, int limit) {
  117. GOOGLE_DCHECK(limit >= 0 && limit <= INT_MAX - kSlopBytes);
  118. // This add is safe due to the invariant above, because
  119. // ptr - buffer_end_ <= kSlopBytes.
  120. limit += static_cast<int>(ptr - buffer_end_);
  121. limit_end_ = buffer_end_ + (std::min)(0, limit);
  122. auto old_limit = limit_;
  123. limit_ = limit;
  124. return old_limit - limit;
  125. }
  126. PROTOBUF_NODISCARD bool PopLimit(int delta) {
  127. if (PROTOBUF_PREDICT_FALSE(!EndedAtLimit())) return false;
  128. limit_ = limit_ + delta;
  129. // TODO(gerbens) We could remove this line and hoist the code to
  130. // DoneFallback. Study the perf/bin-size effects.
  131. limit_end_ = buffer_end_ + (std::min)(0, limit_);
  132. return true;
  133. }
  134. PROTOBUF_NODISCARD const char* Skip(const char* ptr, int size) {
  135. if (size <= buffer_end_ + kSlopBytes - ptr) {
  136. return ptr + size;
  137. }
  138. return SkipFallback(ptr, size);
  139. }
  140. PROTOBUF_NODISCARD const char* ReadString(const char* ptr, int size,
  141. std::string* s) {
  142. if (size <= buffer_end_ + kSlopBytes - ptr) {
  143. s->assign(ptr, size);
  144. return ptr + size;
  145. }
  146. return ReadStringFallback(ptr, size, s);
  147. }
  148. PROTOBUF_NODISCARD const char* AppendString(const char* ptr, int size,
  149. std::string* s) {
  150. if (size <= buffer_end_ + kSlopBytes - ptr) {
  151. s->append(ptr, size);
  152. return ptr + size;
  153. }
  154. return AppendStringFallback(ptr, size, s);
  155. }
  156. // Implemented in arenastring.cc
  157. PROTOBUF_NODISCARD const char* ReadArenaString(const char* ptr,
  158. ArenaStringPtr* s,
  159. Arena* arena);
  160. template <typename Tag, typename T>
  161. PROTOBUF_NODISCARD const char* ReadRepeatedFixed(const char* ptr,
  162. Tag expected_tag,
  163. RepeatedField<T>* out);
  164. template <typename T>
  165. PROTOBUF_NODISCARD const char* ReadPackedFixed(const char* ptr, int size,
  166. RepeatedField<T>* out);
  167. template <typename Add>
  168. PROTOBUF_NODISCARD const char* ReadPackedVarint(const char* ptr, Add add);
  169. uint32_t LastTag() const { return last_tag_minus_1_ + 1; }
  170. bool ConsumeEndGroup(uint32_t start_tag) {
  171. bool res = last_tag_minus_1_ == start_tag;
  172. last_tag_minus_1_ = 0;
  173. return res;
  174. }
  175. bool EndedAtLimit() const { return last_tag_minus_1_ == 0; }
  176. bool EndedAtEndOfStream() const { return last_tag_minus_1_ == 1; }
  177. void SetLastTag(uint32_t tag) { last_tag_minus_1_ = tag - 1; }
  178. void SetEndOfStream() { last_tag_minus_1_ = 1; }
  179. bool IsExceedingLimit(const char* ptr) {
  180. return ptr > limit_end_ &&
  181. (next_chunk_ == nullptr || ptr - buffer_end_ > limit_);
  182. }
  183. bool AliasingEnabled() const { return aliasing_ != kNoAliasing; }
  184. int BytesUntilLimit(const char* ptr) const {
  185. return limit_ + static_cast<int>(buffer_end_ - ptr);
  186. }
  187. // Returns true if more data is available, if false is returned one has to
  188. // call Done for further checks.
  189. bool DataAvailable(const char* ptr) { return ptr < limit_end_; }
  190. protected:
  191. // Returns true is limit (either an explicit limit or end of stream) is
  192. // reached. It aligns *ptr across buffer seams.
  193. // If limit is exceeded it returns true and ptr is set to null.
  194. bool DoneWithCheck(const char** ptr, int d) {
  195. GOOGLE_DCHECK(*ptr);
  196. if (PROTOBUF_PREDICT_TRUE(*ptr < limit_end_)) return false;
  197. int overrun = static_cast<int>(*ptr - buffer_end_);
  198. GOOGLE_DCHECK_LE(overrun, kSlopBytes); // Guaranteed by parse loop.
  199. if (overrun ==
  200. limit_) { // No need to flip buffers if we ended on a limit.
  201. // If we actually overrun the buffer and next_chunk_ is null. It means
  202. // the stream ended and we passed the stream end.
  203. if (overrun > 0 && next_chunk_ == nullptr) *ptr = nullptr;
  204. return true;
  205. }
  206. auto res = DoneFallback(overrun, d);
  207. *ptr = res.first;
  208. return res.second;
  209. }
  210. const char* InitFrom(StringPiece flat) {
  211. overall_limit_ = 0;
  212. if (flat.size() > kSlopBytes) {
  213. limit_ = kSlopBytes;
  214. limit_end_ = buffer_end_ = flat.data() + flat.size() - kSlopBytes;
  215. next_chunk_ = buffer_;
  216. if (aliasing_ == kOnPatch) aliasing_ = kNoDelta;
  217. return flat.data();
  218. } else {
  219. std::memcpy(buffer_, flat.data(), flat.size());
  220. limit_ = 0;
  221. limit_end_ = buffer_end_ = buffer_ + flat.size();
  222. next_chunk_ = nullptr;
  223. if (aliasing_ == kOnPatch) {
  224. aliasing_ = reinterpret_cast<std::uintptr_t>(flat.data()) -
  225. reinterpret_cast<std::uintptr_t>(buffer_);
  226. }
  227. return buffer_;
  228. }
  229. }
  230. const char* InitFrom(io::ZeroCopyInputStream* zcis);
  231. const char* InitFrom(io::ZeroCopyInputStream* zcis, int limit) {
  232. if (limit == -1) return InitFrom(zcis);
  233. overall_limit_ = limit;
  234. auto res = InitFrom(zcis);
  235. limit_ = limit - static_cast<int>(buffer_end_ - res);
  236. limit_end_ = buffer_end_ + (std::min)(0, limit_);
  237. return res;
  238. }
  239. private:
  240. const char* limit_end_; // buffer_end_ + min(limit_, 0)
  241. const char* buffer_end_;
  242. const char* next_chunk_;
  243. int size_;
  244. int limit_; // relative to buffer_end_;
  245. io::ZeroCopyInputStream* zcis_ = nullptr;
  246. char buffer_[2 * kSlopBytes] = {};
  247. enum { kNoAliasing = 0, kOnPatch = 1, kNoDelta = 2 };
  248. std::uintptr_t aliasing_ = kNoAliasing;
  249. // This variable is used to communicate how the parse ended, in order to
  250. // completely verify the parsed data. A wire-format parse can end because of
  251. // one of the following conditions:
  252. // 1) A parse can end on a pushed limit.
  253. // 2) A parse can end on End Of Stream (EOS).
  254. // 3) A parse can end on 0 tag (only valid for toplevel message).
  255. // 4) A parse can end on an end-group tag.
  256. // This variable should always be set to 0, which indicates case 1. If the
  257. // parse terminated due to EOS (case 2), it's set to 1. In case the parse
  258. // ended due to a terminating tag (case 3 and 4) it's set to (tag - 1).
  259. // This var doesn't really belong in EpsCopyInputStream and should be part of
  260. // the ParseContext, but case 2 is most easily and optimally implemented in
  261. // DoneFallback.
  262. uint32_t last_tag_minus_1_ = 0;
  263. int overall_limit_ = INT_MAX; // Overall limit independent of pushed limits.
  264. // Pretty random large number that seems like a safe allocation on most
  265. // systems. TODO(gerbens) do we need to set this as build flag?
  266. enum { kSafeStringSize = 50000000 };
  267. // Advances to next buffer chunk returns a pointer to the same logical place
  268. // in the stream as set by overrun. Overrun indicates the position in the slop
  269. // region the parse was left (0 <= overrun <= kSlopBytes). Returns true if at
  270. // limit, at which point the returned pointer maybe null if there was an
  271. // error. The invariant of this function is that it's guaranteed that
  272. // kSlopBytes bytes can be accessed from the returned ptr. This function might
  273. // advance more buffers than one in the underlying ZeroCopyInputStream.
  274. std::pair<const char*, bool> DoneFallback(int overrun, int depth);
  275. // Advances to the next buffer, at most one call to Next() on the underlying
  276. // ZeroCopyInputStream is made. This function DOES NOT match the returned
  277. // pointer to where in the slop region the parse ends, hence no overrun
  278. // parameter. This is useful for string operations where you always copy
  279. // to the end of the buffer (including the slop region).
  280. const char* Next();
  281. // overrun is the location in the slop region the stream currently is
  282. // (0 <= overrun <= kSlopBytes). To prevent flipping to the next buffer of
  283. // the ZeroCopyInputStream in the case the parse will end in the last
  284. // kSlopBytes of the current buffer. depth is the current depth of nested
  285. // groups (or negative if the use case does not need careful tracking).
  286. inline const char* NextBuffer(int overrun, int depth);
  287. const char* SkipFallback(const char* ptr, int size);
  288. const char* AppendStringFallback(const char* ptr, int size, std::string* str);
  289. const char* ReadStringFallback(const char* ptr, int size, std::string* str);
  290. bool StreamNext(const void** data) {
  291. bool res = zcis_->Next(data, &size_);
  292. if (res) overall_limit_ -= size_;
  293. return res;
  294. }
  295. void StreamBackUp(int count) {
  296. zcis_->BackUp(count);
  297. overall_limit_ += count;
  298. }
  299. template <typename A>
  300. const char* AppendSize(const char* ptr, int size, const A& append) {
  301. int chunk_size = buffer_end_ + kSlopBytes - ptr;
  302. do {
  303. GOOGLE_DCHECK(size > chunk_size);
  304. if (next_chunk_ == nullptr) return nullptr;
  305. append(ptr, chunk_size);
  306. ptr += chunk_size;
  307. size -= chunk_size;
  308. // TODO(gerbens) Next calls NextBuffer which generates buffers with
  309. // overlap and thus incurs cost of copying the slop regions. This is not
  310. // necessary for reading strings. We should just call Next buffers.
  311. if (limit_ <= kSlopBytes) return nullptr;
  312. ptr = Next();
  313. if (ptr == nullptr) return nullptr; // passed the limit
  314. ptr += kSlopBytes;
  315. chunk_size = buffer_end_ + kSlopBytes - ptr;
  316. } while (size > chunk_size);
  317. append(ptr, size);
  318. return ptr + size;
  319. }
  320. // AppendUntilEnd appends data until a limit (either a PushLimit or end of
  321. // stream. Normal payloads are from length delimited fields which have an
  322. // explicit size. Reading until limit only comes when the string takes
  323. // the place of a protobuf, ie RawMessage/StringRawMessage, lazy fields and
  324. // implicit weak messages. We keep these methods private and friend them.
  325. template <typename A>
  326. const char* AppendUntilEnd(const char* ptr, const A& append) {
  327. if (ptr - buffer_end_ > limit_) return nullptr;
  328. while (limit_ > kSlopBytes) {
  329. size_t chunk_size = buffer_end_ + kSlopBytes - ptr;
  330. append(ptr, chunk_size);
  331. ptr = Next();
  332. if (ptr == nullptr) return limit_end_;
  333. ptr += kSlopBytes;
  334. }
  335. auto end = buffer_end_ + limit_;
  336. GOOGLE_DCHECK(end >= ptr);
  337. append(ptr, end - ptr);
  338. return end;
  339. }
  340. PROTOBUF_NODISCARD const char* AppendString(const char* ptr,
  341. std::string* str) {
  342. return AppendUntilEnd(
  343. ptr, [str](const char* p, ptrdiff_t s) { str->append(p, s); });
  344. }
  345. friend class ImplicitWeakMessage;
  346. };
  347. using LazyEagerVerifyFnType = const char* (*)(const char* ptr,
  348. ParseContext* ctx);
  349. using LazyEagerVerifyFnRef = std::remove_pointer<LazyEagerVerifyFnType>::type&;
  350. // ParseContext holds all data that is global to the entire parse. Most
  351. // importantly it contains the input stream, but also recursion depth and also
  352. // stores the end group tag, in case a parser ended on a endgroup, to verify
  353. // matching start/end group tags.
  354. class PROTOBUF_EXPORT ParseContext : public EpsCopyInputStream {
  355. public:
  356. struct Data {
  357. const DescriptorPool* pool = nullptr;
  358. MessageFactory* factory = nullptr;
  359. Arena* arena = nullptr;
  360. };
  361. template <typename... T>
  362. ParseContext(int depth, bool aliasing, const char** start, T&&... args)
  363. : EpsCopyInputStream(aliasing), depth_(depth) {
  364. *start = InitFrom(std::forward<T>(args)...);
  365. }
  366. void TrackCorrectEnding() { group_depth_ = 0; }
  367. bool Done(const char** ptr) { return DoneWithCheck(ptr, group_depth_); }
  368. int depth() const { return depth_; }
  369. Data& data() { return data_; }
  370. const Data& data() const { return data_; }
  371. const char* ParseMessage(MessageLite* msg, const char* ptr);
  372. // Spawns a child parsing context that inherits key properties. New context
  373. // inherits the following:
  374. // --depth_, data_, check_required_fields_, lazy_parse_mode_
  375. // The spawned context always disables aliasing (different input).
  376. template <typename... T>
  377. ParseContext Spawn(const char** start, T&&... args) {
  378. ParseContext spawned(depth_, false, start, std::forward<T>(args)...);
  379. // Transfer key context states.
  380. spawned.data_ = data_;
  381. return spawned;
  382. }
  383. // This overload supports those few cases where ParseMessage is called
  384. // on a class that is not actually a proto message.
  385. // TODO(jorg): Eliminate this use case.
  386. template <typename T,
  387. typename std::enable_if<!std::is_base_of<MessageLite, T>::value,
  388. bool>::type = true>
  389. PROTOBUF_NODISCARD const char* ParseMessage(T* msg, const char* ptr);
  390. template <typename T>
  391. PROTOBUF_NODISCARD PROTOBUF_NDEBUG_INLINE const char* ParseGroup(
  392. T* msg, const char* ptr, uint32_t tag) {
  393. if (--depth_ < 0) return nullptr;
  394. group_depth_++;
  395. ptr = msg->_InternalParse(ptr, this);
  396. group_depth_--;
  397. depth_++;
  398. if (PROTOBUF_PREDICT_FALSE(!ConsumeEndGroup(tag))) return nullptr;
  399. return ptr;
  400. }
  401. private:
  402. // Out-of-line routine to save space in ParseContext::ParseMessage<T>
  403. // int old;
  404. // ptr = ReadSizeAndPushLimitAndDepth(ptr, &old)
  405. // is equivalent to:
  406. // int size = ReadSize(&ptr);
  407. // if (!ptr) return nullptr;
  408. // int old = PushLimit(ptr, size);
  409. // if (--depth_ < 0) return nullptr;
  410. PROTOBUF_NODISCARD const char* ReadSizeAndPushLimitAndDepth(const char* ptr,
  411. int* old_limit);
  412. // The context keeps an internal stack to keep track of the recursive
  413. // part of the parse state.
  414. // Current depth of the active parser, depth counts down.
  415. // This is used to limit recursion depth (to prevent overflow on malicious
  416. // data), but is also used to index in stack_ to store the current state.
  417. int depth_;
  418. // Unfortunately necessary for the fringe case of ending on 0 or end-group tag
  419. // in the last kSlopBytes of a ZeroCopyInputStream chunk.
  420. int group_depth_ = INT_MIN;
  421. Data data_;
  422. };
  423. template <uint32_t tag>
  424. bool ExpectTag(const char* ptr) {
  425. if (tag < 128) {
  426. return *ptr == static_cast<char>(tag);
  427. } else {
  428. static_assert(tag < 128 * 128, "We only expect tags for 1 or 2 bytes");
  429. char buf[2] = {static_cast<char>(tag | 0x80), static_cast<char>(tag >> 7)};
  430. return std::memcmp(ptr, buf, 2) == 0;
  431. }
  432. }
  433. template <int>
  434. struct EndianHelper;
  435. template <>
  436. struct EndianHelper<1> {
  437. static uint8_t Load(const void* p) { return *static_cast<const uint8_t*>(p); }
  438. };
  439. template <>
  440. struct EndianHelper<2> {
  441. static uint16_t Load(const void* p) {
  442. uint16_t tmp;
  443. std::memcpy(&tmp, p, 2);
  444. return little_endian::ToHost(tmp);
  445. }
  446. };
  447. template <>
  448. struct EndianHelper<4> {
  449. static uint32_t Load(const void* p) {
  450. uint32_t tmp;
  451. std::memcpy(&tmp, p, 4);
  452. return little_endian::ToHost(tmp);
  453. }
  454. };
  455. template <>
  456. struct EndianHelper<8> {
  457. static uint64_t Load(const void* p) {
  458. uint64_t tmp;
  459. std::memcpy(&tmp, p, 8);
  460. return little_endian::ToHost(tmp);
  461. }
  462. };
  463. template <typename T>
  464. T UnalignedLoad(const char* p) {
  465. auto tmp = EndianHelper<sizeof(T)>::Load(p);
  466. T res;
  467. memcpy(&res, &tmp, sizeof(T));
  468. return res;
  469. }
  470. PROTOBUF_EXPORT
  471. std::pair<const char*, uint32_t> VarintParseSlow32(const char* p, uint32_t res);
  472. PROTOBUF_EXPORT
  473. std::pair<const char*, uint64_t> VarintParseSlow64(const char* p, uint32_t res);
  474. inline const char* VarintParseSlow(const char* p, uint32_t res, uint32_t* out) {
  475. auto tmp = VarintParseSlow32(p, res);
  476. *out = tmp.second;
  477. return tmp.first;
  478. }
  479. inline const char* VarintParseSlow(const char* p, uint32_t res, uint64_t* out) {
  480. auto tmp = VarintParseSlow64(p, res);
  481. *out = tmp.second;
  482. return tmp.first;
  483. }
  484. template <typename T>
  485. PROTOBUF_NODISCARD const char* VarintParse(const char* p, T* out) {
  486. auto ptr = reinterpret_cast<const uint8_t*>(p);
  487. uint32_t res = ptr[0];
  488. if (!(res & 0x80)) {
  489. *out = res;
  490. return p + 1;
  491. }
  492. uint32_t byte = ptr[1];
  493. res += (byte - 1) << 7;
  494. if (!(byte & 0x80)) {
  495. *out = res;
  496. return p + 2;
  497. }
  498. return VarintParseSlow(p, res, out);
  499. }
  500. // Used for tags, could read up to 5 bytes which must be available.
  501. // Caller must ensure its safe to call.
  502. PROTOBUF_EXPORT
  503. std::pair<const char*, uint32_t> ReadTagFallback(const char* p, uint32_t res);
  504. // Same as ParseVarint but only accept 5 bytes at most.
  505. inline const char* ReadTag(const char* p, uint32_t* out,
  506. uint32_t /*max_tag*/ = 0) {
  507. uint32_t res = static_cast<uint8_t>(p[0]);
  508. if (res < 128) {
  509. *out = res;
  510. return p + 1;
  511. }
  512. uint32_t second = static_cast<uint8_t>(p[1]);
  513. res += (second - 1) << 7;
  514. if (second < 128) {
  515. *out = res;
  516. return p + 2;
  517. }
  518. auto tmp = ReadTagFallback(p, res);
  519. *out = tmp.second;
  520. return tmp.first;
  521. }
  522. // As above, but optimized to consume very few registers while still being fast,
  523. // ReadTagInlined is useful for callers that don't mind the extra code but would
  524. // like to avoid an extern function call causing spills into the stack.
  525. //
  526. // Two support routines for ReadTagInlined come first...
  527. template <class T>
  528. PROTOBUF_NODISCARD PROTOBUF_ALWAYS_INLINE constexpr T RotateLeft(
  529. T x, int s) noexcept {
  530. return static_cast<T>(x << (s & (std::numeric_limits<T>::digits - 1))) |
  531. static_cast<T>(x >> ((-s) & (std::numeric_limits<T>::digits - 1)));
  532. }
  533. PROTOBUF_NODISCARD inline PROTOBUF_ALWAYS_INLINE uint64_t
  534. RotRight7AndReplaceLowByte(uint64_t res, const char& byte) {
  535. #if defined(__x86_64__) && defined(__GNUC__)
  536. // This will only use one register for `res`.
  537. // `byte` comes as a reference to allow the compiler to generate code like:
  538. //
  539. // rorq $7, %rcx
  540. // movb 1(%rax), %cl
  541. //
  542. // which avoids loading the incoming bytes into a separate register first.
  543. asm("ror $7,%0\n\t"
  544. "movb %1,%b0"
  545. : "+r"(res)
  546. : "m"(byte));
  547. #else
  548. res = RotateLeft(res, -7);
  549. res = res & ~0xFF;
  550. res |= 0xFF & byte;
  551. #endif
  552. return res;
  553. };
  554. inline PROTOBUF_ALWAYS_INLINE
  555. const char* ReadTagInlined(const char* ptr, uint32_t* out) {
  556. uint64_t res = 0xFF & ptr[0];
  557. if (PROTOBUF_PREDICT_FALSE(res >= 128)) {
  558. res = RotRight7AndReplaceLowByte(res, ptr[1]);
  559. if (PROTOBUF_PREDICT_FALSE(res & 0x80)) {
  560. res = RotRight7AndReplaceLowByte(res, ptr[2]);
  561. if (PROTOBUF_PREDICT_FALSE(res & 0x80)) {
  562. res = RotRight7AndReplaceLowByte(res, ptr[3]);
  563. if (PROTOBUF_PREDICT_FALSE(res & 0x80)) {
  564. // Note: this wouldn't work if res were 32-bit,
  565. // because then replacing the low byte would overwrite
  566. // the bottom 4 bits of the result.
  567. res = RotRight7AndReplaceLowByte(res, ptr[4]);
  568. if (PROTOBUF_PREDICT_FALSE(res & 0x80)) {
  569. // The proto format does not permit longer than 5-byte encodings for
  570. // tags.
  571. *out = 0;
  572. return nullptr;
  573. }
  574. *out = static_cast<uint32_t>(RotateLeft(res, 28));
  575. #if defined(__GNUC__)
  576. // Note: this asm statement prevents the compiler from
  577. // trying to share the "return ptr + constant" among all
  578. // branches.
  579. asm("" : "+r"(ptr));
  580. #endif
  581. return ptr + 5;
  582. }
  583. *out = static_cast<uint32_t>(RotateLeft(res, 21));
  584. return ptr + 4;
  585. }
  586. *out = static_cast<uint32_t>(RotateLeft(res, 14));
  587. return ptr + 3;
  588. }
  589. *out = static_cast<uint32_t>(RotateLeft(res, 7));
  590. return ptr + 2;
  591. }
  592. *out = static_cast<uint32_t>(res);
  593. return ptr + 1;
  594. }
  595. // Decode 2 consecutive bytes of a varint and returns the value, shifted left
  596. // by 1. It simultaneous updates *ptr to *ptr + 1 or *ptr + 2 depending if the
  597. // first byte's continuation bit is set.
  598. // If bit 15 of return value is set (equivalent to the continuation bits of both
  599. // bytes being set) the varint continues, otherwise the parse is done. On x86
  600. // movsx eax, dil
  601. // and edi, eax
  602. // add eax, edi
  603. // adc [rsi], 1
  604. inline uint32_t DecodeTwoBytes(const char** ptr) {
  605. uint32_t value = UnalignedLoad<uint16_t>(*ptr);
  606. // Sign extend the low byte continuation bit
  607. uint32_t x = static_cast<int8_t>(value);
  608. value &= x; // Mask out the high byte iff no continuation
  609. // This add is an amazing operation, it cancels the low byte continuation bit
  610. // from y transferring it to the carry. Simultaneously it also shifts the 7
  611. // LSB left by one tightly against high byte varint bits. Hence value now
  612. // contains the unpacked value shifted left by 1.
  613. value += x;
  614. // Use the carry to update the ptr appropriately.
  615. *ptr += value < x ? 2 : 1;
  616. return value;
  617. }
  618. // More efficient varint parsing for big varints
  619. inline const char* ParseBigVarint(const char* p, uint64_t* out) {
  620. auto pnew = p;
  621. auto tmp = DecodeTwoBytes(&pnew);
  622. uint64_t res = tmp >> 1;
  623. if (PROTOBUF_PREDICT_TRUE(static_cast<std::int16_t>(tmp) >= 0)) {
  624. *out = res;
  625. return pnew;
  626. }
  627. for (std::uint32_t i = 1; i < 5; i++) {
  628. pnew = p + 2 * i;
  629. tmp = DecodeTwoBytes(&pnew);
  630. res += (static_cast<std::uint64_t>(tmp) - 2) << (14 * i - 1);
  631. if (PROTOBUF_PREDICT_TRUE(static_cast<std::int16_t>(tmp) >= 0)) {
  632. *out = res;
  633. return pnew;
  634. }
  635. }
  636. return nullptr;
  637. }
  638. PROTOBUF_EXPORT
  639. std::pair<const char*, int32_t> ReadSizeFallback(const char* p, uint32_t first);
  640. // Used for tags, could read up to 5 bytes which must be available. Additionally
  641. // it makes sure the unsigned value fits a int32_t, otherwise returns nullptr.
  642. // Caller must ensure its safe to call.
  643. inline uint32_t ReadSize(const char** pp) {
  644. auto p = *pp;
  645. uint32_t res = static_cast<uint8_t>(p[0]);
  646. if (res < 128) {
  647. *pp = p + 1;
  648. return res;
  649. }
  650. auto x = ReadSizeFallback(p, res);
  651. *pp = x.first;
  652. return x.second;
  653. }
  654. // Some convenience functions to simplify the generated parse loop code.
  655. // Returning the value and updating the buffer pointer allows for nicer
  656. // function composition. We rely on the compiler to inline this.
  657. // Also in debug compiles having local scoped variables tend to generated
  658. // stack frames that scale as O(num fields).
  659. inline uint64_t ReadVarint64(const char** p) {
  660. uint64_t tmp;
  661. *p = VarintParse(*p, &tmp);
  662. return tmp;
  663. }
  664. inline uint32_t ReadVarint32(const char** p) {
  665. uint32_t tmp;
  666. *p = VarintParse(*p, &tmp);
  667. return tmp;
  668. }
  669. inline int64_t ReadVarintZigZag64(const char** p) {
  670. uint64_t tmp;
  671. *p = VarintParse(*p, &tmp);
  672. return WireFormatLite::ZigZagDecode64(tmp);
  673. }
  674. inline int32_t ReadVarintZigZag32(const char** p) {
  675. uint64_t tmp;
  676. *p = VarintParse(*p, &tmp);
  677. return WireFormatLite::ZigZagDecode32(static_cast<uint32_t>(tmp));
  678. }
  679. template <typename T, typename std::enable_if<
  680. !std::is_base_of<MessageLite, T>::value, bool>::type>
  681. PROTOBUF_NODISCARD const char* ParseContext::ParseMessage(T* msg,
  682. const char* ptr) {
  683. int old;
  684. ptr = ReadSizeAndPushLimitAndDepth(ptr, &old);
  685. ptr = ptr ? msg->_InternalParse(ptr, this) : nullptr;
  686. depth_++;
  687. if (!PopLimit(old)) return nullptr;
  688. return ptr;
  689. }
  690. template <typename Tag, typename T>
  691. const char* EpsCopyInputStream::ReadRepeatedFixed(const char* ptr,
  692. Tag expected_tag,
  693. RepeatedField<T>* out) {
  694. do {
  695. out->Add(UnalignedLoad<T>(ptr));
  696. ptr += sizeof(T);
  697. if (PROTOBUF_PREDICT_FALSE(ptr >= limit_end_)) return ptr;
  698. } while (UnalignedLoad<Tag>(ptr) == expected_tag && (ptr += sizeof(Tag)));
  699. return ptr;
  700. }
  701. // Add any of the following lines to debug which parse function is failing.
  702. #define GOOGLE_PROTOBUF_ASSERT_RETURN(predicate, ret) \
  703. if (!(predicate)) { \
  704. /* ::raise(SIGINT); */ \
  705. /* GOOGLE_LOG(ERROR) << "Parse failure"; */ \
  706. return ret; \
  707. }
  708. #define GOOGLE_PROTOBUF_PARSER_ASSERT(predicate) \
  709. GOOGLE_PROTOBUF_ASSERT_RETURN(predicate, nullptr)
  710. template <typename T>
  711. const char* EpsCopyInputStream::ReadPackedFixed(const char* ptr, int size,
  712. RepeatedField<T>* out) {
  713. GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
  714. int nbytes = buffer_end_ + kSlopBytes - ptr;
  715. while (size > nbytes) {
  716. int num = nbytes / sizeof(T);
  717. int old_entries = out->size();
  718. out->Reserve(old_entries + num);
  719. int block_size = num * sizeof(T);
  720. auto dst = out->AddNAlreadyReserved(num);
  721. #ifdef PROTOBUF_LITTLE_ENDIAN
  722. std::memcpy(dst, ptr, block_size);
  723. #else
  724. for (int i = 0; i < num; i++)
  725. dst[i] = UnalignedLoad<T>(ptr + i * sizeof(T));
  726. #endif
  727. size -= block_size;
  728. if (limit_ <= kSlopBytes) return nullptr;
  729. ptr = Next();
  730. if (ptr == nullptr) return nullptr;
  731. ptr += kSlopBytes - (nbytes - block_size);
  732. nbytes = buffer_end_ + kSlopBytes - ptr;
  733. }
  734. int num = size / sizeof(T);
  735. int old_entries = out->size();
  736. out->Reserve(old_entries + num);
  737. int block_size = num * sizeof(T);
  738. auto dst = out->AddNAlreadyReserved(num);
  739. #ifdef PROTOBUF_LITTLE_ENDIAN
  740. std::memcpy(dst, ptr, block_size);
  741. #else
  742. for (int i = 0; i < num; i++) dst[i] = UnalignedLoad<T>(ptr + i * sizeof(T));
  743. #endif
  744. ptr += block_size;
  745. if (size != block_size) return nullptr;
  746. return ptr;
  747. }
  748. template <typename Add>
  749. const char* ReadPackedVarintArray(const char* ptr, const char* end, Add add) {
  750. while (ptr < end) {
  751. uint64_t varint;
  752. ptr = VarintParse(ptr, &varint);
  753. if (ptr == nullptr) return nullptr;
  754. add(varint);
  755. }
  756. return ptr;
  757. }
  758. template <typename Add>
  759. const char* EpsCopyInputStream::ReadPackedVarint(const char* ptr, Add add) {
  760. int size = ReadSize(&ptr);
  761. GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
  762. int chunk_size = buffer_end_ - ptr;
  763. while (size > chunk_size) {
  764. ptr = ReadPackedVarintArray(ptr, buffer_end_, add);
  765. if (ptr == nullptr) return nullptr;
  766. int overrun = ptr - buffer_end_;
  767. GOOGLE_DCHECK(overrun >= 0 && overrun <= kSlopBytes);
  768. if (size - chunk_size <= kSlopBytes) {
  769. // The current buffer contains all the information needed, we don't need
  770. // to flip buffers. However we must parse from a buffer with enough space
  771. // so we are not prone to a buffer overflow.
  772. char buf[kSlopBytes + 10] = {};
  773. std::memcpy(buf, buffer_end_, kSlopBytes);
  774. GOOGLE_CHECK_LE(size - chunk_size, kSlopBytes);
  775. auto end = buf + (size - chunk_size);
  776. auto res = ReadPackedVarintArray(buf + overrun, end, add);
  777. if (res == nullptr || res != end) return nullptr;
  778. return buffer_end_ + (res - buf);
  779. }
  780. size -= overrun + chunk_size;
  781. GOOGLE_DCHECK_GT(size, 0);
  782. // We must flip buffers
  783. if (limit_ <= kSlopBytes) return nullptr;
  784. ptr = Next();
  785. if (ptr == nullptr) return nullptr;
  786. ptr += overrun;
  787. chunk_size = buffer_end_ - ptr;
  788. }
  789. auto end = ptr + size;
  790. ptr = ReadPackedVarintArray(ptr, end, add);
  791. return end == ptr ? ptr : nullptr;
  792. }
  793. // Helper for verification of utf8
  794. PROTOBUF_EXPORT
  795. bool VerifyUTF8(StringPiece s, const char* field_name);
  796. inline bool VerifyUTF8(const std::string* s, const char* field_name) {
  797. return VerifyUTF8(*s, field_name);
  798. }
  799. // All the string parsers with or without UTF checking and for all CTypes.
  800. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* InlineGreedyStringParser(
  801. std::string* s, const char* ptr, ParseContext* ctx);
  802. template <typename T>
  803. PROTOBUF_NODISCARD const char* FieldParser(uint64_t tag, T& field_parser,
  804. const char* ptr, ParseContext* ctx) {
  805. uint32_t number = tag >> 3;
  806. GOOGLE_PROTOBUF_PARSER_ASSERT(number != 0);
  807. using WireType = internal::WireFormatLite::WireType;
  808. switch (tag & 7) {
  809. case WireType::WIRETYPE_VARINT: {
  810. uint64_t value;
  811. ptr = VarintParse(ptr, &value);
  812. GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
  813. field_parser.AddVarint(number, value);
  814. break;
  815. }
  816. case WireType::WIRETYPE_FIXED64: {
  817. uint64_t value = UnalignedLoad<uint64_t>(ptr);
  818. ptr += 8;
  819. field_parser.AddFixed64(number, value);
  820. break;
  821. }
  822. case WireType::WIRETYPE_LENGTH_DELIMITED: {
  823. ptr = field_parser.ParseLengthDelimited(number, ptr, ctx);
  824. GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
  825. break;
  826. }
  827. case WireType::WIRETYPE_START_GROUP: {
  828. ptr = field_parser.ParseGroup(number, ptr, ctx);
  829. GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
  830. break;
  831. }
  832. case WireType::WIRETYPE_END_GROUP: {
  833. GOOGLE_LOG(FATAL) << "Can't happen";
  834. break;
  835. }
  836. case WireType::WIRETYPE_FIXED32: {
  837. uint32_t value = UnalignedLoad<uint32_t>(ptr);
  838. ptr += 4;
  839. field_parser.AddFixed32(number, value);
  840. break;
  841. }
  842. default:
  843. return nullptr;
  844. }
  845. return ptr;
  846. }
  847. template <typename T>
  848. PROTOBUF_NODISCARD const char* WireFormatParser(T& field_parser,
  849. const char* ptr,
  850. ParseContext* ctx) {
  851. while (!ctx->Done(&ptr)) {
  852. uint32_t tag;
  853. ptr = ReadTag(ptr, &tag);
  854. GOOGLE_PROTOBUF_PARSER_ASSERT(ptr != nullptr);
  855. if (tag == 0 || (tag & 7) == 4) {
  856. ctx->SetLastTag(tag);
  857. return ptr;
  858. }
  859. ptr = FieldParser(tag, field_parser, ptr, ctx);
  860. GOOGLE_PROTOBUF_PARSER_ASSERT(ptr != nullptr);
  861. }
  862. return ptr;
  863. }
  864. // The packed parsers parse repeated numeric primitives directly into the
  865. // corresponding field
  866. // These are packed varints
  867. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedInt32Parser(
  868. void* object, const char* ptr, ParseContext* ctx);
  869. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedUInt32Parser(
  870. void* object, const char* ptr, ParseContext* ctx);
  871. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedInt64Parser(
  872. void* object, const char* ptr, ParseContext* ctx);
  873. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedUInt64Parser(
  874. void* object, const char* ptr, ParseContext* ctx);
  875. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedSInt32Parser(
  876. void* object, const char* ptr, ParseContext* ctx);
  877. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedSInt64Parser(
  878. void* object, const char* ptr, ParseContext* ctx);
  879. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedEnumParser(
  880. void* object, const char* ptr, ParseContext* ctx);
  881. template <typename T>
  882. PROTOBUF_NODISCARD const char* PackedEnumParser(void* object, const char* ptr,
  883. ParseContext* ctx,
  884. bool (*is_valid)(int),
  885. InternalMetadata* metadata,
  886. int field_num) {
  887. return ctx->ReadPackedVarint(
  888. ptr, [object, is_valid, metadata, field_num](uint64_t val) {
  889. if (is_valid(val)) {
  890. static_cast<RepeatedField<int>*>(object)->Add(val);
  891. } else {
  892. WriteVarint(field_num, val, metadata->mutable_unknown_fields<T>());
  893. }
  894. });
  895. }
  896. template <typename T>
  897. PROTOBUF_NODISCARD const char* PackedEnumParserArg(
  898. void* object, const char* ptr, ParseContext* ctx,
  899. bool (*is_valid)(const void*, int), const void* data,
  900. InternalMetadata* metadata, int field_num) {
  901. return ctx->ReadPackedVarint(
  902. ptr, [object, is_valid, data, metadata, field_num](uint64_t val) {
  903. if (is_valid(data, val)) {
  904. static_cast<RepeatedField<int>*>(object)->Add(val);
  905. } else {
  906. WriteVarint(field_num, val, metadata->mutable_unknown_fields<T>());
  907. }
  908. });
  909. }
  910. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedBoolParser(
  911. void* object, const char* ptr, ParseContext* ctx);
  912. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedFixed32Parser(
  913. void* object, const char* ptr, ParseContext* ctx);
  914. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedSFixed32Parser(
  915. void* object, const char* ptr, ParseContext* ctx);
  916. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedFixed64Parser(
  917. void* object, const char* ptr, ParseContext* ctx);
  918. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedSFixed64Parser(
  919. void* object, const char* ptr, ParseContext* ctx);
  920. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedFloatParser(
  921. void* object, const char* ptr, ParseContext* ctx);
  922. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedDoubleParser(
  923. void* object, const char* ptr, ParseContext* ctx);
  924. // This is the only recursive parser.
  925. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* UnknownGroupLiteParse(
  926. std::string* unknown, const char* ptr, ParseContext* ctx);
  927. // This is a helper to for the UnknownGroupLiteParse but is actually also
  928. // useful in the generated code. It uses overload on std::string* vs
  929. // UnknownFieldSet* to make the generated code isomorphic between full and lite.
  930. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* UnknownFieldParse(
  931. uint32_t tag, std::string* unknown, const char* ptr, ParseContext* ctx);
  932. } // namespace internal
  933. } // namespace protobuf
  934. } // namespace google
  935. #include <google/protobuf/port_undef.inc>
  936. #endif // GOOGLE_PROTOBUF_PARSE_CONTEXT_H__