You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

parse_context.h 53 kB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199
  1. // Protocol Buffers - Google's data interchange format
  2. // Copyright 2008 Google Inc. All rights reserved.
  3. // https://developers.google.com/protocol-buffers/
  4. //
  5. // Redistribution and use in source and binary forms, with or without
  6. // modification, are permitted provided that the following conditions are
  7. // met:
  8. //
  9. // * Redistributions of source code must retain the above copyright
  10. // notice, this list of conditions and the following disclaimer.
  11. // * Redistributions in binary form must reproduce the above
  12. // copyright notice, this list of conditions and the following disclaimer
  13. // in the documentation and/or other materials provided with the
  14. // distribution.
  15. // * Neither the name of Google Inc. nor the names of its
  16. // contributors may be used to endorse or promote products derived from
  17. // this software without specific prior written permission.
  18. //
  19. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  20. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  21. // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  22. // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  23. // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  24. // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  25. // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  26. // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  27. // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  28. // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  29. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  30. #ifndef GOOGLE_PROTOBUF_PARSE_CONTEXT_H__
  31. #define GOOGLE_PROTOBUF_PARSE_CONTEXT_H__
  32. #include <cstdint>
  33. #include <cstring>
  34. #include <string>
  35. #include <type_traits>
  36. #include <google/protobuf/io/coded_stream.h>
  37. #include <google/protobuf/io/zero_copy_stream.h>
  38. #include <google/protobuf/arena.h>
  39. #include <google/protobuf/port.h>
  40. #include <google/protobuf/stubs/strutil.h>
  41. #include <google/protobuf/arenastring.h>
  42. #include <google/protobuf/endian.h>
  43. #include <google/protobuf/implicit_weak_message.h>
  44. #include <google/protobuf/inlined_string_field.h>
  45. #include <google/protobuf/metadata_lite.h>
  46. #include <google/protobuf/repeated_field.h>
  47. #include <google/protobuf/wire_format_lite.h>
  48. // Must be included last.
  49. #include <google/protobuf/port_def.inc>
  50. namespace google
  51. {
  52. namespace protobuf
  53. {
  54. class UnknownFieldSet;
  55. class DescriptorPool;
  56. class MessageFactory;
  57. namespace internal
  58. {
  59. // Template code below needs to know about the existence of these functions.
  60. PROTOBUF_EXPORT void WriteVarint(uint32_t num, uint64_t val, std::string* s);
  61. PROTOBUF_EXPORT void WriteLengthDelimited(uint32_t num, StringPiece val, std::string* s);
  62. // Inline because it is just forwarding to s->WriteVarint
  63. inline void WriteVarint(uint32_t num, uint64_t val, UnknownFieldSet* s);
  64. inline void WriteLengthDelimited(uint32_t num, StringPiece val, UnknownFieldSet* s);
  65. // The basic abstraction the parser is designed for is a slight modification
  66. // of the ZeroCopyInputStream (ZCIS) abstraction. A ZCIS presents a serialized
  67. // stream as a series of buffers that concatenate to the full stream.
  68. // Pictorially a ZCIS presents a stream in chunks like so
  69. // [---------------------------------------------------------------]
  70. // [---------------------] chunk 1
  71. // [----------------------------] chunk 2
  72. // chunk 3 [--------------]
  73. //
  74. // Where the '-' represent the bytes which are vertically lined up with the
  75. // bytes of the stream. The proto parser requires its input to be presented
  76. // similarly with the extra
  77. // property that each chunk has kSlopBytes past its end that overlaps with the
  78. // first kSlopBytes of the next chunk, or if there is no next chunk at least its
  79. // still valid to read those bytes. Again, pictorially, we now have
  80. //
  81. // [---------------------------------------------------------------]
  82. // [-------------------....] chunk 1
  83. // [------------------------....] chunk 2
  84. // chunk 3 [------------------..**]
  85. // chunk 4 [--****]
  86. // Here '-' mean the bytes of the stream or chunk and '.' means bytes past the
  87. // chunk that match up with the start of the next chunk. Above each chunk has
  88. // 4 '.' after the chunk. In the case these 'overflow' bytes represents bytes
  89. // past the stream, indicated by '*' above, their values are unspecified. It is
  90. // still legal to read them (ie. should not segfault). Reading past the
  91. // end should be detected by the user and indicated as an error.
  92. //
  93. // The reason for this, admittedly, unconventional invariant is to ruthlessly
  94. // optimize the protobuf parser. Having an overlap helps in two important ways.
  95. // Firstly it alleviates having to performing bounds checks if a piece of code
  96. // is guaranteed to not read more than kSlopBytes. Secondly, and more
  97. // importantly, the protobuf wireformat is such that reading a key/value pair is
  98. // always less than 16 bytes. This removes the need to change to next buffer in
  99. // the middle of reading primitive values. Hence there is no need to store and
  100. // load the current position.
  101. class PROTOBUF_EXPORT EpsCopyInputStream
  102. {
  103. public:
  104. enum
  105. {
  106. kSlopBytes = 16,
  107. kMaxCordBytesToCopy = 512
  108. };
  109. explicit EpsCopyInputStream(bool enable_aliasing) :
  110. aliasing_(enable_aliasing ? kOnPatch : kNoAliasing)
  111. {
  112. }
  113. void BackUp(const char* ptr)
  114. {
  115. GOOGLE_DCHECK(ptr <= buffer_end_ + kSlopBytes);
  116. int count;
  117. if (next_chunk_ == buffer_)
  118. {
  119. count = static_cast<int>(buffer_end_ + kSlopBytes - ptr);
  120. }
  121. else
  122. {
  123. count = size_ + static_cast<int>(buffer_end_ - ptr);
  124. }
  125. if (count > 0)
  126. StreamBackUp(count);
  127. }
  128. // If return value is negative it's an error
  129. PROTOBUF_NODISCARD int PushLimit(const char* ptr, int limit)
  130. {
  131. GOOGLE_DCHECK(limit >= 0 && limit <= INT_MAX - kSlopBytes);
  132. // This add is safe due to the invariant above, because
  133. // ptr - buffer_end_ <= kSlopBytes.
  134. limit += static_cast<int>(ptr - buffer_end_);
  135. limit_end_ = buffer_end_ + (std::min)(0, limit);
  136. auto old_limit = limit_;
  137. limit_ = limit;
  138. return old_limit - limit;
  139. }
  140. PROTOBUF_NODISCARD bool PopLimit(int delta)
  141. {
  142. if (PROTOBUF_PREDICT_FALSE(!EndedAtLimit()))
  143. return false;
  144. limit_ = limit_ + delta;
  145. // TODO(gerbens) We could remove this line and hoist the code to
  146. // DoneFallback. Study the perf/bin-size effects.
  147. limit_end_ = buffer_end_ + (std::min)(0, limit_);
  148. return true;
  149. }
  150. PROTOBUF_NODISCARD const char* Skip(const char* ptr, int size)
  151. {
  152. if (size <= buffer_end_ + kSlopBytes - ptr)
  153. {
  154. return ptr + size;
  155. }
  156. return SkipFallback(ptr, size);
  157. }
  158. PROTOBUF_NODISCARD const char* ReadString(const char* ptr, int size, std::string* s)
  159. {
  160. if (size <= buffer_end_ + kSlopBytes - ptr)
  161. {
  162. s->assign(ptr, size);
  163. return ptr + size;
  164. }
  165. return ReadStringFallback(ptr, size, s);
  166. }
  167. PROTOBUF_NODISCARD const char* AppendString(const char* ptr, int size, std::string* s)
  168. {
  169. if (size <= buffer_end_ + kSlopBytes - ptr)
  170. {
  171. s->append(ptr, size);
  172. return ptr + size;
  173. }
  174. return AppendStringFallback(ptr, size, s);
  175. }
  176. // Implemented in arenastring.cc
  177. PROTOBUF_NODISCARD const char* ReadArenaString(const char* ptr, ArenaStringPtr* s, Arena* arena);
  178. template<typename Tag, typename T>
  179. PROTOBUF_NODISCARD const char* ReadRepeatedFixed(const char* ptr, Tag expected_tag, RepeatedField<T>* out);
  180. template<typename T>
  181. PROTOBUF_NODISCARD const char* ReadPackedFixed(const char* ptr, int size, RepeatedField<T>* out);
  182. template<typename Add>
  183. PROTOBUF_NODISCARD const char* ReadPackedVarint(const char* ptr, Add add);
  184. uint32_t LastTag() const
  185. {
  186. return last_tag_minus_1_ + 1;
  187. }
  188. bool ConsumeEndGroup(uint32_t start_tag)
  189. {
  190. bool res = last_tag_minus_1_ == start_tag;
  191. last_tag_minus_1_ = 0;
  192. return res;
  193. }
  194. bool EndedAtLimit() const
  195. {
  196. return last_tag_minus_1_ == 0;
  197. }
  198. bool EndedAtEndOfStream() const
  199. {
  200. return last_tag_minus_1_ == 1;
  201. }
  202. void SetLastTag(uint32_t tag)
  203. {
  204. last_tag_minus_1_ = tag - 1;
  205. }
  206. void SetEndOfStream()
  207. {
  208. last_tag_minus_1_ = 1;
  209. }
  210. bool IsExceedingLimit(const char* ptr)
  211. {
  212. return ptr > limit_end_ &&
  213. (next_chunk_ == nullptr || ptr - buffer_end_ > limit_);
  214. }
  215. bool AliasingEnabled() const
  216. {
  217. return aliasing_ != kNoAliasing;
  218. }
  219. int BytesUntilLimit(const char* ptr) const
  220. {
  221. return limit_ + static_cast<int>(buffer_end_ - ptr);
  222. }
  223. // Returns true if more data is available, if false is returned one has to
  224. // call Done for further checks.
  225. bool DataAvailable(const char* ptr)
  226. {
  227. return ptr < limit_end_;
  228. }
  229. protected:
  230. // Returns true is limit (either an explicit limit or end of stream) is
  231. // reached. It aligns *ptr across buffer seams.
  232. // If limit is exceeded it returns true and ptr is set to null.
  233. bool DoneWithCheck(const char** ptr, int d)
  234. {
  235. GOOGLE_DCHECK(*ptr);
  236. if (PROTOBUF_PREDICT_TRUE(*ptr < limit_end_))
  237. return false;
  238. int overrun = static_cast<int>(*ptr - buffer_end_);
  239. GOOGLE_DCHECK_LE(overrun, kSlopBytes); // Guaranteed by parse loop.
  240. if (overrun ==
  241. limit_)
  242. { // No need to flip buffers if we ended on a limit.
  243. // If we actually overrun the buffer and next_chunk_ is null. It means
  244. // the stream ended and we passed the stream end.
  245. if (overrun > 0 && next_chunk_ == nullptr)
  246. *ptr = nullptr;
  247. return true;
  248. }
  249. auto res = DoneFallback(overrun, d);
  250. *ptr = res.first;
  251. return res.second;
  252. }
  253. const char* InitFrom(StringPiece flat)
  254. {
  255. overall_limit_ = 0;
  256. if (flat.size() > kSlopBytes)
  257. {
  258. limit_ = kSlopBytes;
  259. limit_end_ = buffer_end_ = flat.data() + flat.size() - kSlopBytes;
  260. next_chunk_ = buffer_;
  261. if (aliasing_ == kOnPatch)
  262. aliasing_ = kNoDelta;
  263. return flat.data();
  264. }
  265. else
  266. {
  267. std::memcpy(buffer_, flat.data(), flat.size());
  268. limit_ = 0;
  269. limit_end_ = buffer_end_ = buffer_ + flat.size();
  270. next_chunk_ = nullptr;
  271. if (aliasing_ == kOnPatch)
  272. {
  273. aliasing_ = reinterpret_cast<std::uintptr_t>(flat.data()) -
  274. reinterpret_cast<std::uintptr_t>(buffer_);
  275. }
  276. return buffer_;
  277. }
  278. }
  279. const char* InitFrom(io::ZeroCopyInputStream* zcis);
  280. const char* InitFrom(io::ZeroCopyInputStream* zcis, int limit)
  281. {
  282. if (limit == -1)
  283. return InitFrom(zcis);
  284. overall_limit_ = limit;
  285. auto res = InitFrom(zcis);
  286. limit_ = limit - static_cast<int>(buffer_end_ - res);
  287. limit_end_ = buffer_end_ + (std::min)(0, limit_);
  288. return res;
  289. }
  290. private:
  291. const char* limit_end_; // buffer_end_ + min(limit_, 0)
  292. const char* buffer_end_;
  293. const char* next_chunk_;
  294. int size_;
  295. int limit_; // relative to buffer_end_;
  296. io::ZeroCopyInputStream* zcis_ = nullptr;
  297. char buffer_[2 * kSlopBytes] = {};
  298. enum
  299. {
  300. kNoAliasing = 0,
  301. kOnPatch = 1,
  302. kNoDelta = 2
  303. };
  304. std::uintptr_t aliasing_ = kNoAliasing;
  305. // This variable is used to communicate how the parse ended, in order to
  306. // completely verify the parsed data. A wire-format parse can end because of
  307. // one of the following conditions:
  308. // 1) A parse can end on a pushed limit.
  309. // 2) A parse can end on End Of Stream (EOS).
  310. // 3) A parse can end on 0 tag (only valid for toplevel message).
  311. // 4) A parse can end on an end-group tag.
  312. // This variable should always be set to 0, which indicates case 1. If the
  313. // parse terminated due to EOS (case 2), it's set to 1. In case the parse
  314. // ended due to a terminating tag (case 3 and 4) it's set to (tag - 1).
  315. // This var doesn't really belong in EpsCopyInputStream and should be part of
  316. // the ParseContext, but case 2 is most easily and optimally implemented in
  317. // DoneFallback.
  318. uint32_t last_tag_minus_1_ = 0;
  319. int overall_limit_ = INT_MAX; // Overall limit independent of pushed limits.
  320. // Pretty random large number that seems like a safe allocation on most
  321. // systems. TODO(gerbens) do we need to set this as build flag?
  322. enum
  323. {
  324. kSafeStringSize = 50000000
  325. };
  326. // Advances to next buffer chunk returns a pointer to the same logical place
  327. // in the stream as set by overrun. Overrun indicates the position in the slop
  328. // region the parse was left (0 <= overrun <= kSlopBytes). Returns true if at
  329. // limit, at which point the returned pointer maybe null if there was an
  330. // error. The invariant of this function is that it's guaranteed that
  331. // kSlopBytes bytes can be accessed from the returned ptr. This function might
  332. // advance more buffers than one in the underlying ZeroCopyInputStream.
  333. std::pair<const char*, bool> DoneFallback(int overrun, int depth);
  334. // Advances to the next buffer, at most one call to Next() on the underlying
  335. // ZeroCopyInputStream is made. This function DOES NOT match the returned
  336. // pointer to where in the slop region the parse ends, hence no overrun
  337. // parameter. This is useful for string operations where you always copy
  338. // to the end of the buffer (including the slop region).
  339. const char* Next();
  340. // overrun is the location in the slop region the stream currently is
  341. // (0 <= overrun <= kSlopBytes). To prevent flipping to the next buffer of
  342. // the ZeroCopyInputStream in the case the parse will end in the last
  343. // kSlopBytes of the current buffer. depth is the current depth of nested
  344. // groups (or negative if the use case does not need careful tracking).
  345. inline const char* NextBuffer(int overrun, int depth);
  346. const char* SkipFallback(const char* ptr, int size);
  347. const char* AppendStringFallback(const char* ptr, int size, std::string* str);
  348. const char* ReadStringFallback(const char* ptr, int size, std::string* str);
  349. bool StreamNext(const void** data)
  350. {
  351. bool res = zcis_->Next(data, &size_);
  352. if (res)
  353. overall_limit_ -= size_;
  354. return res;
  355. }
  356. void StreamBackUp(int count)
  357. {
  358. zcis_->BackUp(count);
  359. overall_limit_ += count;
  360. }
  361. template<typename A>
  362. const char* AppendSize(const char* ptr, int size, const A& append)
  363. {
  364. int chunk_size = buffer_end_ + kSlopBytes - ptr;
  365. do
  366. {
  367. GOOGLE_DCHECK(size > chunk_size);
  368. if (next_chunk_ == nullptr)
  369. return nullptr;
  370. append(ptr, chunk_size);
  371. ptr += chunk_size;
  372. size -= chunk_size;
  373. // TODO(gerbens) Next calls NextBuffer which generates buffers with
  374. // overlap and thus incurs cost of copying the slop regions. This is not
  375. // necessary for reading strings. We should just call Next buffers.
  376. if (limit_ <= kSlopBytes)
  377. return nullptr;
  378. ptr = Next();
  379. if (ptr == nullptr)
  380. return nullptr; // passed the limit
  381. ptr += kSlopBytes;
  382. chunk_size = buffer_end_ + kSlopBytes - ptr;
  383. } while (size > chunk_size);
  384. append(ptr, size);
  385. return ptr + size;
  386. }
  387. // AppendUntilEnd appends data until a limit (either a PushLimit or end of
  388. // stream. Normal payloads are from length delimited fields which have an
  389. // explicit size. Reading until limit only comes when the string takes
  390. // the place of a protobuf, ie RawMessage/StringRawMessage, lazy fields and
  391. // implicit weak messages. We keep these methods private and friend them.
  392. template<typename A>
  393. const char* AppendUntilEnd(const char* ptr, const A& append)
  394. {
  395. if (ptr - buffer_end_ > limit_)
  396. return nullptr;
  397. while (limit_ > kSlopBytes)
  398. {
  399. size_t chunk_size = buffer_end_ + kSlopBytes - ptr;
  400. append(ptr, chunk_size);
  401. ptr = Next();
  402. if (ptr == nullptr)
  403. return limit_end_;
  404. ptr += kSlopBytes;
  405. }
  406. auto end = buffer_end_ + limit_;
  407. GOOGLE_DCHECK(end >= ptr);
  408. append(ptr, end - ptr);
  409. return end;
  410. }
  411. PROTOBUF_NODISCARD const char* AppendString(const char* ptr, std::string* str)
  412. {
  413. return AppendUntilEnd(
  414. ptr, [str](const char* p, ptrdiff_t s)
  415. { str->append(p, s); }
  416. );
  417. }
  418. friend class ImplicitWeakMessage;
  419. };
  420. using LazyEagerVerifyFnType = const char* (*)(const char* ptr, ParseContext* ctx);
  421. using LazyEagerVerifyFnRef = std::remove_pointer<LazyEagerVerifyFnType>::type&;
  422. // ParseContext holds all data that is global to the entire parse. Most
  423. // importantly it contains the input stream, but also recursion depth and also
  424. // stores the end group tag, in case a parser ended on a endgroup, to verify
  425. // matching start/end group tags.
  426. class PROTOBUF_EXPORT ParseContext : public EpsCopyInputStream
  427. {
  428. public:
  429. struct Data
  430. {
  431. const DescriptorPool* pool = nullptr;
  432. MessageFactory* factory = nullptr;
  433. Arena* arena = nullptr;
  434. };
  435. template<typename... T>
  436. ParseContext(int depth, bool aliasing, const char** start, T&&... args) :
  437. EpsCopyInputStream(aliasing),
  438. depth_(depth)
  439. {
  440. *start = InitFrom(std::forward<T>(args)...);
  441. }
  442. void TrackCorrectEnding()
  443. {
  444. group_depth_ = 0;
  445. }
  446. bool Done(const char** ptr)
  447. {
  448. return DoneWithCheck(ptr, group_depth_);
  449. }
  450. int depth() const
  451. {
  452. return depth_;
  453. }
  454. Data& data()
  455. {
  456. return data_;
  457. }
  458. const Data& data() const
  459. {
  460. return data_;
  461. }
  462. const char* ParseMessage(MessageLite* msg, const char* ptr);
  463. // Spawns a child parsing context that inherits key properties. New context
  464. // inherits the following:
  465. // --depth_, data_, check_required_fields_, lazy_parse_mode_
  466. // The spawned context always disables aliasing (different input).
  467. template<typename... T>
  468. ParseContext Spawn(const char** start, T&&... args)
  469. {
  470. ParseContext spawned(depth_, false, start, std::forward<T>(args)...);
  471. // Transfer key context states.
  472. spawned.data_ = data_;
  473. return spawned;
  474. }
  475. // This overload supports those few cases where ParseMessage is called
  476. // on a class that is not actually a proto message.
  477. // TODO(jorg): Eliminate this use case.
  478. template<typename T, typename std::enable_if<!std::is_base_of<MessageLite, T>::value, bool>::type = true>
  479. PROTOBUF_NODISCARD const char* ParseMessage(T* msg, const char* ptr);
  480. template<typename T>
  481. PROTOBUF_NODISCARD PROTOBUF_NDEBUG_INLINE const char* ParseGroup(
  482. T* msg, const char* ptr, uint32_t tag
  483. )
  484. {
  485. if (--depth_ < 0)
  486. return nullptr;
  487. group_depth_++;
  488. ptr = msg->_InternalParse(ptr, this);
  489. group_depth_--;
  490. depth_++;
  491. if (PROTOBUF_PREDICT_FALSE(!ConsumeEndGroup(tag)))
  492. return nullptr;
  493. return ptr;
  494. }
  495. private:
  496. // Out-of-line routine to save space in ParseContext::ParseMessage<T>
  497. // int old;
  498. // ptr = ReadSizeAndPushLimitAndDepth(ptr, &old)
  499. // is equivalent to:
  500. // int size = ReadSize(&ptr);
  501. // if (!ptr) return nullptr;
  502. // int old = PushLimit(ptr, size);
  503. // if (--depth_ < 0) return nullptr;
  504. PROTOBUF_NODISCARD const char* ReadSizeAndPushLimitAndDepth(const char* ptr, int* old_limit);
  505. // The context keeps an internal stack to keep track of the recursive
  506. // part of the parse state.
  507. // Current depth of the active parser, depth counts down.
  508. // This is used to limit recursion depth (to prevent overflow on malicious
  509. // data), but is also used to index in stack_ to store the current state.
  510. int depth_;
  511. // Unfortunately necessary for the fringe case of ending on 0 or end-group tag
  512. // in the last kSlopBytes of a ZeroCopyInputStream chunk.
  513. int group_depth_ = INT_MIN;
  514. Data data_;
  515. };
  516. template<uint32_t tag>
  517. bool ExpectTag(const char* ptr)
  518. {
  519. if (tag < 128)
  520. {
  521. return *ptr == static_cast<char>(tag);
  522. }
  523. else
  524. {
  525. static_assert(tag < 128 * 128, "We only expect tags for 1 or 2 bytes");
  526. char buf[2] = {static_cast<char>(tag | 0x80), static_cast<char>(tag >> 7)};
  527. return std::memcmp(ptr, buf, 2) == 0;
  528. }
  529. }
  530. template<int>
  531. struct EndianHelper;
  532. template<>
  533. struct EndianHelper<1>
  534. {
  535. static uint8_t Load(const void* p)
  536. {
  537. return *static_cast<const uint8_t*>(p);
  538. }
  539. };
  540. template<>
  541. struct EndianHelper<2>
  542. {
  543. static uint16_t Load(const void* p)
  544. {
  545. uint16_t tmp;
  546. std::memcpy(&tmp, p, 2);
  547. return little_endian::ToHost(tmp);
  548. }
  549. };
  550. template<>
  551. struct EndianHelper<4>
  552. {
  553. static uint32_t Load(const void* p)
  554. {
  555. uint32_t tmp;
  556. std::memcpy(&tmp, p, 4);
  557. return little_endian::ToHost(tmp);
  558. }
  559. };
  560. template<>
  561. struct EndianHelper<8>
  562. {
  563. static uint64_t Load(const void* p)
  564. {
  565. uint64_t tmp;
  566. std::memcpy(&tmp, p, 8);
  567. return little_endian::ToHost(tmp);
  568. }
  569. };
  570. template<typename T>
  571. T UnalignedLoad(const char* p)
  572. {
  573. auto tmp = EndianHelper<sizeof(T)>::Load(p);
  574. T res;
  575. memcpy(&res, &tmp, sizeof(T));
  576. return res;
  577. }
  578. PROTOBUF_EXPORT
  579. std::pair<const char*, uint32_t> VarintParseSlow32(const char* p, uint32_t res);
  580. PROTOBUF_EXPORT
  581. std::pair<const char*, uint64_t> VarintParseSlow64(const char* p, uint32_t res);
  582. inline const char* VarintParseSlow(const char* p, uint32_t res, uint32_t* out)
  583. {
  584. auto tmp = VarintParseSlow32(p, res);
  585. *out = tmp.second;
  586. return tmp.first;
  587. }
  588. inline const char* VarintParseSlow(const char* p, uint32_t res, uint64_t* out)
  589. {
  590. auto tmp = VarintParseSlow64(p, res);
  591. *out = tmp.second;
  592. return tmp.first;
  593. }
  594. template<typename T>
  595. PROTOBUF_NODISCARD const char* VarintParse(const char* p, T* out)
  596. {
  597. auto ptr = reinterpret_cast<const uint8_t*>(p);
  598. uint32_t res = ptr[0];
  599. if (!(res & 0x80))
  600. {
  601. *out = res;
  602. return p + 1;
  603. }
  604. uint32_t byte = ptr[1];
  605. res += (byte - 1) << 7;
  606. if (!(byte & 0x80))
  607. {
  608. *out = res;
  609. return p + 2;
  610. }
  611. return VarintParseSlow(p, res, out);
  612. }
  613. // Used for tags, could read up to 5 bytes which must be available.
  614. // Caller must ensure its safe to call.
  615. PROTOBUF_EXPORT
  616. std::pair<const char*, uint32_t> ReadTagFallback(const char* p, uint32_t res);
  617. // Same as ParseVarint but only accept 5 bytes at most.
  618. inline const char* ReadTag(const char* p, uint32_t* out, uint32_t /*max_tag*/ = 0)
  619. {
  620. uint32_t res = static_cast<uint8_t>(p[0]);
  621. if (res < 128)
  622. {
  623. *out = res;
  624. return p + 1;
  625. }
  626. uint32_t second = static_cast<uint8_t>(p[1]);
  627. res += (second - 1) << 7;
  628. if (second < 128)
  629. {
  630. *out = res;
  631. return p + 2;
  632. }
  633. auto tmp = ReadTagFallback(p, res);
  634. *out = tmp.second;
  635. return tmp.first;
  636. }
  637. // As above, but optimized to consume very few registers while still being fast,
  638. // ReadTagInlined is useful for callers that don't mind the extra code but would
  639. // like to avoid an extern function call causing spills into the stack.
  640. //
  641. // Two support routines for ReadTagInlined come first...
  642. template<class T>
  643. PROTOBUF_NODISCARD PROTOBUF_ALWAYS_INLINE constexpr T RotateLeft(
  644. T x, int s
  645. ) noexcept
  646. {
  647. return static_cast<T>(x << (s & (std::numeric_limits<T>::digits - 1))) |
  648. static_cast<T>(x >> ((-s) & (std::numeric_limits<T>::digits - 1)));
  649. }
  650. PROTOBUF_NODISCARD inline PROTOBUF_ALWAYS_INLINE uint64_t
  651. RotRight7AndReplaceLowByte(uint64_t res, const char& byte)
  652. {
  653. #if defined(__x86_64__) && defined(__GNUC__)
  654. // This will only use one register for `res`.
  655. // `byte` comes as a reference to allow the compiler to generate code like:
  656. //
  657. // rorq $7, %rcx
  658. // movb 1(%rax), %cl
  659. //
  660. // which avoids loading the incoming bytes into a separate register first.
  661. asm("ror $7,%0\n\t"
  662. "movb %1,%b0"
  663. : "+r"(res)
  664. : "m"(byte));
  665. #else
  666. res = RotateLeft(res, -7);
  667. res = res & ~0xFF;
  668. res |= 0xFF & byte;
  669. #endif
  670. return res;
  671. };
  672. inline PROTOBUF_ALWAYS_INLINE const char* ReadTagInlined(const char* ptr, uint32_t* out)
  673. {
  674. uint64_t res = 0xFF & ptr[0];
  675. if (PROTOBUF_PREDICT_FALSE(res >= 128))
  676. {
  677. res = RotRight7AndReplaceLowByte(res, ptr[1]);
  678. if (PROTOBUF_PREDICT_FALSE(res & 0x80))
  679. {
  680. res = RotRight7AndReplaceLowByte(res, ptr[2]);
  681. if (PROTOBUF_PREDICT_FALSE(res & 0x80))
  682. {
  683. res = RotRight7AndReplaceLowByte(res, ptr[3]);
  684. if (PROTOBUF_PREDICT_FALSE(res & 0x80))
  685. {
  686. // Note: this wouldn't work if res were 32-bit,
  687. // because then replacing the low byte would overwrite
  688. // the bottom 4 bits of the result.
  689. res = RotRight7AndReplaceLowByte(res, ptr[4]);
  690. if (PROTOBUF_PREDICT_FALSE(res & 0x80))
  691. {
  692. // The proto format does not permit longer than 5-byte encodings for
  693. // tags.
  694. *out = 0;
  695. return nullptr;
  696. }
  697. *out = static_cast<uint32_t>(RotateLeft(res, 28));
  698. #if defined(__GNUC__)
  699. // Note: this asm statement prevents the compiler from
  700. // trying to share the "return ptr + constant" among all
  701. // branches.
  702. asm(""
  703. : "+r"(ptr));
  704. #endif
  705. return ptr + 5;
  706. }
  707. *out = static_cast<uint32_t>(RotateLeft(res, 21));
  708. return ptr + 4;
  709. }
  710. *out = static_cast<uint32_t>(RotateLeft(res, 14));
  711. return ptr + 3;
  712. }
  713. *out = static_cast<uint32_t>(RotateLeft(res, 7));
  714. return ptr + 2;
  715. }
  716. *out = static_cast<uint32_t>(res);
  717. return ptr + 1;
  718. }
  719. // Decode 2 consecutive bytes of a varint and returns the value, shifted left
  720. // by 1. It simultaneous updates *ptr to *ptr + 1 or *ptr + 2 depending if the
  721. // first byte's continuation bit is set.
  722. // If bit 15 of return value is set (equivalent to the continuation bits of both
  723. // bytes being set) the varint continues, otherwise the parse is done. On x86
  724. // movsx eax, dil
  725. // and edi, eax
  726. // add eax, edi
  727. // adc [rsi], 1
  728. inline uint32_t DecodeTwoBytes(const char** ptr)
  729. {
  730. uint32_t value = UnalignedLoad<uint16_t>(*ptr);
  731. // Sign extend the low byte continuation bit
  732. uint32_t x = static_cast<int8_t>(value);
  733. value &= x; // Mask out the high byte iff no continuation
  734. // This add is an amazing operation, it cancels the low byte continuation bit
  735. // from y transferring it to the carry. Simultaneously it also shifts the 7
  736. // LSB left by one tightly against high byte varint bits. Hence value now
  737. // contains the unpacked value shifted left by 1.
  738. value += x;
  739. // Use the carry to update the ptr appropriately.
  740. *ptr += value < x ? 2 : 1;
  741. return value;
  742. }
  743. // More efficient varint parsing for big varints
  744. inline const char* ParseBigVarint(const char* p, uint64_t* out)
  745. {
  746. auto pnew = p;
  747. auto tmp = DecodeTwoBytes(&pnew);
  748. uint64_t res = tmp >> 1;
  749. if (PROTOBUF_PREDICT_TRUE(static_cast<std::int16_t>(tmp) >= 0))
  750. {
  751. *out = res;
  752. return pnew;
  753. }
  754. for (std::uint32_t i = 1; i < 5; i++)
  755. {
  756. pnew = p + 2 * i;
  757. tmp = DecodeTwoBytes(&pnew);
  758. res += (static_cast<std::uint64_t>(tmp) - 2) << (14 * i - 1);
  759. if (PROTOBUF_PREDICT_TRUE(static_cast<std::int16_t>(tmp) >= 0))
  760. {
  761. *out = res;
  762. return pnew;
  763. }
  764. }
  765. return nullptr;
  766. }
  767. PROTOBUF_EXPORT
  768. std::pair<const char*, int32_t> ReadSizeFallback(const char* p, uint32_t first);
  769. // Used for tags, could read up to 5 bytes which must be available. Additionally
  770. // it makes sure the unsigned value fits a int32_t, otherwise returns nullptr.
  771. // Caller must ensure its safe to call.
  772. inline uint32_t ReadSize(const char** pp)
  773. {
  774. auto p = *pp;
  775. uint32_t res = static_cast<uint8_t>(p[0]);
  776. if (res < 128)
  777. {
  778. *pp = p + 1;
  779. return res;
  780. }
  781. auto x = ReadSizeFallback(p, res);
  782. *pp = x.first;
  783. return x.second;
  784. }
  785. // Some convenience functions to simplify the generated parse loop code.
  786. // Returning the value and updating the buffer pointer allows for nicer
  787. // function composition. We rely on the compiler to inline this.
  788. // Also in debug compiles having local scoped variables tend to generated
  789. // stack frames that scale as O(num fields).
  790. inline uint64_t ReadVarint64(const char** p)
  791. {
  792. uint64_t tmp;
  793. *p = VarintParse(*p, &tmp);
  794. return tmp;
  795. }
  796. inline uint32_t ReadVarint32(const char** p)
  797. {
  798. uint32_t tmp;
  799. *p = VarintParse(*p, &tmp);
  800. return tmp;
  801. }
  802. inline int64_t ReadVarintZigZag64(const char** p)
  803. {
  804. uint64_t tmp;
  805. *p = VarintParse(*p, &tmp);
  806. return WireFormatLite::ZigZagDecode64(tmp);
  807. }
  808. inline int32_t ReadVarintZigZag32(const char** p)
  809. {
  810. uint64_t tmp;
  811. *p = VarintParse(*p, &tmp);
  812. return WireFormatLite::ZigZagDecode32(static_cast<uint32_t>(tmp));
  813. }
  814. template<typename T, typename std::enable_if<!std::is_base_of<MessageLite, T>::value, bool>::type>
  815. PROTOBUF_NODISCARD const char* ParseContext::ParseMessage(T* msg, const char* ptr)
  816. {
  817. int old;
  818. ptr = ReadSizeAndPushLimitAndDepth(ptr, &old);
  819. ptr = ptr ? msg->_InternalParse(ptr, this) : nullptr;
  820. depth_++;
  821. if (!PopLimit(old))
  822. return nullptr;
  823. return ptr;
  824. }
  825. template<typename Tag, typename T>
  826. const char* EpsCopyInputStream::ReadRepeatedFixed(const char* ptr, Tag expected_tag, RepeatedField<T>* out)
  827. {
  828. do
  829. {
  830. out->Add(UnalignedLoad<T>(ptr));
  831. ptr += sizeof(T);
  832. if (PROTOBUF_PREDICT_FALSE(ptr >= limit_end_))
  833. return ptr;
  834. } while (UnalignedLoad<Tag>(ptr) == expected_tag && (ptr += sizeof(Tag)));
  835. return ptr;
  836. }
  837. // Add any of the following lines to debug which parse function is failing.
  838. #define GOOGLE_PROTOBUF_ASSERT_RETURN(predicate, ret) \
  839. if (!(predicate)) \
  840. { \
  841. /* ::raise(SIGINT); */ \
  842. /* GOOGLE_LOG(ERROR) << "Parse failure"; */ \
  843. return ret; \
  844. }
  845. #define GOOGLE_PROTOBUF_PARSER_ASSERT(predicate) \
  846. GOOGLE_PROTOBUF_ASSERT_RETURN(predicate, nullptr)
  847. template<typename T>
  848. const char* EpsCopyInputStream::ReadPackedFixed(const char* ptr, int size, RepeatedField<T>* out)
  849. {
  850. GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
  851. int nbytes = buffer_end_ + kSlopBytes - ptr;
  852. while (size > nbytes)
  853. {
  854. int num = nbytes / sizeof(T);
  855. int old_entries = out->size();
  856. out->Reserve(old_entries + num);
  857. int block_size = num * sizeof(T);
  858. auto dst = out->AddNAlreadyReserved(num);
  859. #ifdef PROTOBUF_LITTLE_ENDIAN
  860. std::memcpy(dst, ptr, block_size);
  861. #else
  862. for (int i = 0; i < num; i++)
  863. dst[i] = UnalignedLoad<T>(ptr + i * sizeof(T));
  864. #endif
  865. size -= block_size;
  866. if (limit_ <= kSlopBytes)
  867. return nullptr;
  868. ptr = Next();
  869. if (ptr == nullptr)
  870. return nullptr;
  871. ptr += kSlopBytes - (nbytes - block_size);
  872. nbytes = buffer_end_ + kSlopBytes - ptr;
  873. }
  874. int num = size / sizeof(T);
  875. int old_entries = out->size();
  876. out->Reserve(old_entries + num);
  877. int block_size = num * sizeof(T);
  878. auto dst = out->AddNAlreadyReserved(num);
  879. #ifdef PROTOBUF_LITTLE_ENDIAN
  880. std::memcpy(dst, ptr, block_size);
  881. #else
  882. for (int i = 0; i < num; i++)
  883. dst[i] = UnalignedLoad<T>(ptr + i * sizeof(T));
  884. #endif
  885. ptr += block_size;
  886. if (size != block_size)
  887. return nullptr;
  888. return ptr;
  889. }
  890. template<typename Add>
  891. const char* ReadPackedVarintArray(const char* ptr, const char* end, Add add)
  892. {
  893. while (ptr < end)
  894. {
  895. uint64_t varint;
  896. ptr = VarintParse(ptr, &varint);
  897. if (ptr == nullptr)
  898. return nullptr;
  899. add(varint);
  900. }
  901. return ptr;
  902. }
  903. template<typename Add>
  904. const char* EpsCopyInputStream::ReadPackedVarint(const char* ptr, Add add)
  905. {
  906. int size = ReadSize(&ptr);
  907. GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
  908. int chunk_size = buffer_end_ - ptr;
  909. while (size > chunk_size)
  910. {
  911. ptr = ReadPackedVarintArray(ptr, buffer_end_, add);
  912. if (ptr == nullptr)
  913. return nullptr;
  914. int overrun = ptr - buffer_end_;
  915. GOOGLE_DCHECK(overrun >= 0 && overrun <= kSlopBytes);
  916. if (size - chunk_size <= kSlopBytes)
  917. {
  918. // The current buffer contains all the information needed, we don't need
  919. // to flip buffers. However we must parse from a buffer with enough space
  920. // so we are not prone to a buffer overflow.
  921. char buf[kSlopBytes + 10] = {};
  922. std::memcpy(buf, buffer_end_, kSlopBytes);
  923. GOOGLE_CHECK_LE(size - chunk_size, kSlopBytes);
  924. auto end = buf + (size - chunk_size);
  925. auto res = ReadPackedVarintArray(buf + overrun, end, add);
  926. if (res == nullptr || res != end)
  927. return nullptr;
  928. return buffer_end_ + (res - buf);
  929. }
  930. size -= overrun + chunk_size;
  931. GOOGLE_DCHECK_GT(size, 0);
  932. // We must flip buffers
  933. if (limit_ <= kSlopBytes)
  934. return nullptr;
  935. ptr = Next();
  936. if (ptr == nullptr)
  937. return nullptr;
  938. ptr += overrun;
  939. chunk_size = buffer_end_ - ptr;
  940. }
  941. auto end = ptr + size;
  942. ptr = ReadPackedVarintArray(ptr, end, add);
  943. return end == ptr ? ptr : nullptr;
  944. }
  945. // Helper for verification of utf8
  946. PROTOBUF_EXPORT
  947. bool VerifyUTF8(StringPiece s, const char* field_name);
  948. inline bool VerifyUTF8(const std::string* s, const char* field_name)
  949. {
  950. return VerifyUTF8(*s, field_name);
  951. }
  952. // All the string parsers with or without UTF checking and for all CTypes.
  953. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* InlineGreedyStringParser(
  954. std::string* s, const char* ptr, ParseContext* ctx
  955. );
  956. template<typename T>
  957. PROTOBUF_NODISCARD const char* FieldParser(uint64_t tag, T& field_parser, const char* ptr, ParseContext* ctx)
  958. {
  959. uint32_t number = tag >> 3;
  960. GOOGLE_PROTOBUF_PARSER_ASSERT(number != 0);
  961. using WireType = internal::WireFormatLite::WireType;
  962. switch (tag & 7)
  963. {
  964. case WireType::WIRETYPE_VARINT:
  965. {
  966. uint64_t value;
  967. ptr = VarintParse(ptr, &value);
  968. GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
  969. field_parser.AddVarint(number, value);
  970. break;
  971. }
  972. case WireType::WIRETYPE_FIXED64:
  973. {
  974. uint64_t value = UnalignedLoad<uint64_t>(ptr);
  975. ptr += 8;
  976. field_parser.AddFixed64(number, value);
  977. break;
  978. }
  979. case WireType::WIRETYPE_LENGTH_DELIMITED:
  980. {
  981. ptr = field_parser.ParseLengthDelimited(number, ptr, ctx);
  982. GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
  983. break;
  984. }
  985. case WireType::WIRETYPE_START_GROUP:
  986. {
  987. ptr = field_parser.ParseGroup(number, ptr, ctx);
  988. GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
  989. break;
  990. }
  991. case WireType::WIRETYPE_END_GROUP:
  992. {
  993. GOOGLE_LOG(FATAL) << "Can't happen";
  994. break;
  995. }
  996. case WireType::WIRETYPE_FIXED32:
  997. {
  998. uint32_t value = UnalignedLoad<uint32_t>(ptr);
  999. ptr += 4;
  1000. field_parser.AddFixed32(number, value);
  1001. break;
  1002. }
  1003. default:
  1004. return nullptr;
  1005. }
  1006. return ptr;
  1007. }
  1008. template<typename T>
  1009. PROTOBUF_NODISCARD const char* WireFormatParser(T& field_parser, const char* ptr, ParseContext* ctx)
  1010. {
  1011. while (!ctx->Done(&ptr))
  1012. {
  1013. uint32_t tag;
  1014. ptr = ReadTag(ptr, &tag);
  1015. GOOGLE_PROTOBUF_PARSER_ASSERT(ptr != nullptr);
  1016. if (tag == 0 || (tag & 7) == 4)
  1017. {
  1018. ctx->SetLastTag(tag);
  1019. return ptr;
  1020. }
  1021. ptr = FieldParser(tag, field_parser, ptr, ctx);
  1022. GOOGLE_PROTOBUF_PARSER_ASSERT(ptr != nullptr);
  1023. }
  1024. return ptr;
  1025. }
  1026. // The packed parsers parse repeated numeric primitives directly into the
  1027. // corresponding field
  1028. // These are packed varints
  1029. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedInt32Parser(
  1030. void* object, const char* ptr, ParseContext* ctx
  1031. );
  1032. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedUInt32Parser(
  1033. void* object, const char* ptr, ParseContext* ctx
  1034. );
  1035. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedInt64Parser(
  1036. void* object, const char* ptr, ParseContext* ctx
  1037. );
  1038. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedUInt64Parser(
  1039. void* object, const char* ptr, ParseContext* ctx
  1040. );
  1041. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedSInt32Parser(
  1042. void* object, const char* ptr, ParseContext* ctx
  1043. );
  1044. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedSInt64Parser(
  1045. void* object, const char* ptr, ParseContext* ctx
  1046. );
  1047. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedEnumParser(
  1048. void* object, const char* ptr, ParseContext* ctx
  1049. );
  1050. template<typename T>
  1051. PROTOBUF_NODISCARD const char* PackedEnumParser(void* object, const char* ptr, ParseContext* ctx, bool (*is_valid)(int), InternalMetadata* metadata, int field_num)
  1052. {
  1053. return ctx->ReadPackedVarint(
  1054. ptr, [object, is_valid, metadata, field_num](uint64_t val)
  1055. {
  1056. if (is_valid(val)) {
  1057. static_cast<RepeatedField<int>*>(object)->Add(val);
  1058. } else {
  1059. WriteVarint(field_num, val, metadata->mutable_unknown_fields<T>());
  1060. } }
  1061. );
  1062. }
  1063. template<typename T>
  1064. PROTOBUF_NODISCARD const char* PackedEnumParserArg(
  1065. void* object, const char* ptr, ParseContext* ctx, bool (*is_valid)(const void*, int), const void* data, InternalMetadata* metadata, int field_num
  1066. )
  1067. {
  1068. return ctx->ReadPackedVarint(
  1069. ptr, [object, is_valid, data, metadata, field_num](uint64_t val)
  1070. {
  1071. if (is_valid(data, val)) {
  1072. static_cast<RepeatedField<int>*>(object)->Add(val);
  1073. } else {
  1074. WriteVarint(field_num, val, metadata->mutable_unknown_fields<T>());
  1075. } }
  1076. );
  1077. }
  1078. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedBoolParser(
  1079. void* object, const char* ptr, ParseContext* ctx
  1080. );
  1081. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedFixed32Parser(
  1082. void* object, const char* ptr, ParseContext* ctx
  1083. );
  1084. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedSFixed32Parser(
  1085. void* object, const char* ptr, ParseContext* ctx
  1086. );
  1087. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedFixed64Parser(
  1088. void* object, const char* ptr, ParseContext* ctx
  1089. );
  1090. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedSFixed64Parser(
  1091. void* object, const char* ptr, ParseContext* ctx
  1092. );
  1093. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedFloatParser(
  1094. void* object, const char* ptr, ParseContext* ctx
  1095. );
  1096. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedDoubleParser(
  1097. void* object, const char* ptr, ParseContext* ctx
  1098. );
  1099. // This is the only recursive parser.
  1100. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* UnknownGroupLiteParse(
  1101. std::string* unknown, const char* ptr, ParseContext* ctx
  1102. );
  1103. // This is a helper to for the UnknownGroupLiteParse but is actually also
  1104. // useful in the generated code. It uses overload on std::string* vs
  1105. // UnknownFieldSet* to make the generated code isomorphic between full and lite.
  1106. PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* UnknownFieldParse(
  1107. uint32_t tag, std::string* unknown, const char* ptr, ParseContext* ctx
  1108. );
  1109. } // namespace internal
  1110. } // namespace protobuf
  1111. } // namespace google
  1112. #include <google/protobuf/port_undef.inc>
  1113. #endif // GOOGLE_PROTOBUF_PARSE_CONTEXT_H__