001    // Generated by the protocol buffer compiler.  DO NOT EDIT!
002    // source: QJournalProtocol.proto
003    
004    package org.apache.hadoop.hdfs.qjournal.protocol;
005    
006    public final class QJournalProtocolProtos {
007      private QJournalProtocolProtos() {}
008      public static void registerAllExtensions(
009          com.google.protobuf.ExtensionRegistry registry) {
010      }
011      public interface JournalIdProtoOrBuilder
012          extends com.google.protobuf.MessageOrBuilder {
013    
014        // required string identifier = 1;
015        /**
016         * <code>required string identifier = 1;</code>
017         */
018        boolean hasIdentifier();
019        /**
020         * <code>required string identifier = 1;</code>
021         */
022        java.lang.String getIdentifier();
023        /**
024         * <code>required string identifier = 1;</code>
025         */
026        com.google.protobuf.ByteString
027            getIdentifierBytes();
028      }
029      /**
030       * Protobuf type {@code hadoop.hdfs.JournalIdProto}
031       */
032      public static final class JournalIdProto extends
033          com.google.protobuf.GeneratedMessage
034          implements JournalIdProtoOrBuilder {
035        // Use JournalIdProto.newBuilder() to construct.
036        private JournalIdProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
037          super(builder);
038          this.unknownFields = builder.getUnknownFields();
039        }
040        private JournalIdProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
041    
042        private static final JournalIdProto defaultInstance;
043        public static JournalIdProto getDefaultInstance() {
044          return defaultInstance;
045        }
046    
047        public JournalIdProto getDefaultInstanceForType() {
048          return defaultInstance;
049        }
050    
051        private final com.google.protobuf.UnknownFieldSet unknownFields;
052        @java.lang.Override
053        public final com.google.protobuf.UnknownFieldSet
054            getUnknownFields() {
055          return this.unknownFields;
056        }
057        private JournalIdProto(
058            com.google.protobuf.CodedInputStream input,
059            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
060            throws com.google.protobuf.InvalidProtocolBufferException {
061          initFields();
062          int mutable_bitField0_ = 0;
063          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
064              com.google.protobuf.UnknownFieldSet.newBuilder();
065          try {
066            boolean done = false;
067            while (!done) {
068              int tag = input.readTag();
069              switch (tag) {
070                case 0:
071                  done = true;
072                  break;
073                default: {
074                  if (!parseUnknownField(input, unknownFields,
075                                         extensionRegistry, tag)) {
076                    done = true;
077                  }
078                  break;
079                }
080                case 10: {
081                  bitField0_ |= 0x00000001;
082                  identifier_ = input.readBytes();
083                  break;
084                }
085              }
086            }
087          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
088            throw e.setUnfinishedMessage(this);
089          } catch (java.io.IOException e) {
090            throw new com.google.protobuf.InvalidProtocolBufferException(
091                e.getMessage()).setUnfinishedMessage(this);
092          } finally {
093            this.unknownFields = unknownFields.build();
094            makeExtensionsImmutable();
095          }
096        }
097        public static final com.google.protobuf.Descriptors.Descriptor
098            getDescriptor() {
099          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalIdProto_descriptor;
100        }
101    
102        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
103            internalGetFieldAccessorTable() {
104          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalIdProto_fieldAccessorTable
105              .ensureFieldAccessorsInitialized(
106                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder.class);
107        }
108    
109        public static com.google.protobuf.Parser<JournalIdProto> PARSER =
110            new com.google.protobuf.AbstractParser<JournalIdProto>() {
111          public JournalIdProto parsePartialFrom(
112              com.google.protobuf.CodedInputStream input,
113              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
114              throws com.google.protobuf.InvalidProtocolBufferException {
115            return new JournalIdProto(input, extensionRegistry);
116          }
117        };
118    
119        @java.lang.Override
120        public com.google.protobuf.Parser<JournalIdProto> getParserForType() {
121          return PARSER;
122        }
123    
124        private int bitField0_;
125        // required string identifier = 1;
126        public static final int IDENTIFIER_FIELD_NUMBER = 1;
127        private java.lang.Object identifier_;
128        /**
129         * <code>required string identifier = 1;</code>
130         */
131        public boolean hasIdentifier() {
132          return ((bitField0_ & 0x00000001) == 0x00000001);
133        }
134        /**
135         * <code>required string identifier = 1;</code>
136         */
137        public java.lang.String getIdentifier() {
138          java.lang.Object ref = identifier_;
139          if (ref instanceof java.lang.String) {
140            return (java.lang.String) ref;
141          } else {
142            com.google.protobuf.ByteString bs = 
143                (com.google.protobuf.ByteString) ref;
144            java.lang.String s = bs.toStringUtf8();
145            if (bs.isValidUtf8()) {
146              identifier_ = s;
147            }
148            return s;
149          }
150        }
151        /**
152         * <code>required string identifier = 1;</code>
153         */
154        public com.google.protobuf.ByteString
155            getIdentifierBytes() {
156          java.lang.Object ref = identifier_;
157          if (ref instanceof java.lang.String) {
158            com.google.protobuf.ByteString b = 
159                com.google.protobuf.ByteString.copyFromUtf8(
160                    (java.lang.String) ref);
161            identifier_ = b;
162            return b;
163          } else {
164            return (com.google.protobuf.ByteString) ref;
165          }
166        }
167    
168        private void initFields() {
169          identifier_ = "";
170        }
171        private byte memoizedIsInitialized = -1;
172        public final boolean isInitialized() {
173          byte isInitialized = memoizedIsInitialized;
174          if (isInitialized != -1) return isInitialized == 1;
175    
176          if (!hasIdentifier()) {
177            memoizedIsInitialized = 0;
178            return false;
179          }
180          memoizedIsInitialized = 1;
181          return true;
182        }
183    
184        public void writeTo(com.google.protobuf.CodedOutputStream output)
185                            throws java.io.IOException {
186          getSerializedSize();
187          if (((bitField0_ & 0x00000001) == 0x00000001)) {
188            output.writeBytes(1, getIdentifierBytes());
189          }
190          getUnknownFields().writeTo(output);
191        }
192    
193        private int memoizedSerializedSize = -1;
194        public int getSerializedSize() {
195          int size = memoizedSerializedSize;
196          if (size != -1) return size;
197    
198          size = 0;
199          if (((bitField0_ & 0x00000001) == 0x00000001)) {
200            size += com.google.protobuf.CodedOutputStream
201              .computeBytesSize(1, getIdentifierBytes());
202          }
203          size += getUnknownFields().getSerializedSize();
204          memoizedSerializedSize = size;
205          return size;
206        }
207    
208        private static final long serialVersionUID = 0L;
209        @java.lang.Override
210        protected java.lang.Object writeReplace()
211            throws java.io.ObjectStreamException {
212          return super.writeReplace();
213        }
214    
215        @java.lang.Override
216        public boolean equals(final java.lang.Object obj) {
217          if (obj == this) {
218           return true;
219          }
220          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto)) {
221            return super.equals(obj);
222          }
223          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto) obj;
224    
225          boolean result = true;
226          result = result && (hasIdentifier() == other.hasIdentifier());
227          if (hasIdentifier()) {
228            result = result && getIdentifier()
229                .equals(other.getIdentifier());
230          }
231          result = result &&
232              getUnknownFields().equals(other.getUnknownFields());
233          return result;
234        }
235    
236        private int memoizedHashCode = 0;
237        @java.lang.Override
238        public int hashCode() {
239          if (memoizedHashCode != 0) {
240            return memoizedHashCode;
241          }
242          int hash = 41;
243          hash = (19 * hash) + getDescriptorForType().hashCode();
244          if (hasIdentifier()) {
245            hash = (37 * hash) + IDENTIFIER_FIELD_NUMBER;
246            hash = (53 * hash) + getIdentifier().hashCode();
247          }
248          hash = (29 * hash) + getUnknownFields().hashCode();
249          memoizedHashCode = hash;
250          return hash;
251        }
252    
253        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
254            com.google.protobuf.ByteString data)
255            throws com.google.protobuf.InvalidProtocolBufferException {
256          return PARSER.parseFrom(data);
257        }
258        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
259            com.google.protobuf.ByteString data,
260            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
261            throws com.google.protobuf.InvalidProtocolBufferException {
262          return PARSER.parseFrom(data, extensionRegistry);
263        }
264        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(byte[] data)
265            throws com.google.protobuf.InvalidProtocolBufferException {
266          return PARSER.parseFrom(data);
267        }
268        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
269            byte[] data,
270            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
271            throws com.google.protobuf.InvalidProtocolBufferException {
272          return PARSER.parseFrom(data, extensionRegistry);
273        }
274        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(java.io.InputStream input)
275            throws java.io.IOException {
276          return PARSER.parseFrom(input);
277        }
278        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
279            java.io.InputStream input,
280            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
281            throws java.io.IOException {
282          return PARSER.parseFrom(input, extensionRegistry);
283        }
284        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseDelimitedFrom(java.io.InputStream input)
285            throws java.io.IOException {
286          return PARSER.parseDelimitedFrom(input);
287        }
288        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseDelimitedFrom(
289            java.io.InputStream input,
290            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
291            throws java.io.IOException {
292          return PARSER.parseDelimitedFrom(input, extensionRegistry);
293        }
294        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
295            com.google.protobuf.CodedInputStream input)
296            throws java.io.IOException {
297          return PARSER.parseFrom(input);
298        }
299        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
300            com.google.protobuf.CodedInputStream input,
301            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
302            throws java.io.IOException {
303          return PARSER.parseFrom(input, extensionRegistry);
304        }
305    
306        public static Builder newBuilder() { return Builder.create(); }
307        public Builder newBuilderForType() { return newBuilder(); }
308        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto prototype) {
309          return newBuilder().mergeFrom(prototype);
310        }
311        public Builder toBuilder() { return newBuilder(this); }
312    
313        @java.lang.Override
314        protected Builder newBuilderForType(
315            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
316          Builder builder = new Builder(parent);
317          return builder;
318        }
319        /**
320         * Protobuf type {@code hadoop.hdfs.JournalIdProto}
321         */
322        public static final class Builder extends
323            com.google.protobuf.GeneratedMessage.Builder<Builder>
324           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder {
325          public static final com.google.protobuf.Descriptors.Descriptor
326              getDescriptor() {
327            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalIdProto_descriptor;
328          }
329    
330          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
331              internalGetFieldAccessorTable() {
332            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalIdProto_fieldAccessorTable
333                .ensureFieldAccessorsInitialized(
334                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder.class);
335          }
336    
337          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder()
338          private Builder() {
339            maybeForceBuilderInitialization();
340          }
341    
342          private Builder(
343              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
344            super(parent);
345            maybeForceBuilderInitialization();
346          }
347          private void maybeForceBuilderInitialization() {
348            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
349            }
350          }
351          private static Builder create() {
352            return new Builder();
353          }
354    
355          public Builder clear() {
356            super.clear();
357            identifier_ = "";
358            bitField0_ = (bitField0_ & ~0x00000001);
359            return this;
360          }
361    
362          public Builder clone() {
363            return create().mergeFrom(buildPartial());
364          }
365    
366          public com.google.protobuf.Descriptors.Descriptor
367              getDescriptorForType() {
368            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalIdProto_descriptor;
369          }
370    
371          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getDefaultInstanceForType() {
372            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
373          }
374    
375          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto build() {
376            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto result = buildPartial();
377            if (!result.isInitialized()) {
378              throw newUninitializedMessageException(result);
379            }
380            return result;
381          }
382    
383          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto buildPartial() {
384            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto(this);
385            int from_bitField0_ = bitField0_;
386            int to_bitField0_ = 0;
387            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
388              to_bitField0_ |= 0x00000001;
389            }
390            result.identifier_ = identifier_;
391            result.bitField0_ = to_bitField0_;
392            onBuilt();
393            return result;
394          }
395    
396          public Builder mergeFrom(com.google.protobuf.Message other) {
397            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto) {
398              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto)other);
399            } else {
400              super.mergeFrom(other);
401              return this;
402            }
403          }
404    
405          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto other) {
406            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) return this;
407            if (other.hasIdentifier()) {
408              bitField0_ |= 0x00000001;
409              identifier_ = other.identifier_;
410              onChanged();
411            }
412            this.mergeUnknownFields(other.getUnknownFields());
413            return this;
414          }
415    
416          public final boolean isInitialized() {
417            if (!hasIdentifier()) {
418              
419              return false;
420            }
421            return true;
422          }
423    
424          public Builder mergeFrom(
425              com.google.protobuf.CodedInputStream input,
426              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
427              throws java.io.IOException {
428            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parsedMessage = null;
429            try {
430              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
431            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
432              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto) e.getUnfinishedMessage();
433              throw e;
434            } finally {
435              if (parsedMessage != null) {
436                mergeFrom(parsedMessage);
437              }
438            }
439            return this;
440          }
441          private int bitField0_;
442    
443          // required string identifier = 1;
444          private java.lang.Object identifier_ = "";
445          /**
446           * <code>required string identifier = 1;</code>
447           */
448          public boolean hasIdentifier() {
449            return ((bitField0_ & 0x00000001) == 0x00000001);
450          }
451          /**
452           * <code>required string identifier = 1;</code>
453           */
454          public java.lang.String getIdentifier() {
455            java.lang.Object ref = identifier_;
456            if (!(ref instanceof java.lang.String)) {
457              java.lang.String s = ((com.google.protobuf.ByteString) ref)
458                  .toStringUtf8();
459              identifier_ = s;
460              return s;
461            } else {
462              return (java.lang.String) ref;
463            }
464          }
465          /**
466           * <code>required string identifier = 1;</code>
467           */
468          public com.google.protobuf.ByteString
469              getIdentifierBytes() {
470            java.lang.Object ref = identifier_;
471            if (ref instanceof String) {
472              com.google.protobuf.ByteString b = 
473                  com.google.protobuf.ByteString.copyFromUtf8(
474                      (java.lang.String) ref);
475              identifier_ = b;
476              return b;
477            } else {
478              return (com.google.protobuf.ByteString) ref;
479            }
480          }
481          /**
482           * <code>required string identifier = 1;</code>
483           */
484          public Builder setIdentifier(
485              java.lang.String value) {
486            if (value == null) {
487        throw new NullPointerException();
488      }
489      bitField0_ |= 0x00000001;
490            identifier_ = value;
491            onChanged();
492            return this;
493          }
494          /**
495           * <code>required string identifier = 1;</code>
496           */
497          public Builder clearIdentifier() {
498            bitField0_ = (bitField0_ & ~0x00000001);
499            identifier_ = getDefaultInstance().getIdentifier();
500            onChanged();
501            return this;
502          }
503          /**
504           * <code>required string identifier = 1;</code>
505           */
506          public Builder setIdentifierBytes(
507              com.google.protobuf.ByteString value) {
508            if (value == null) {
509        throw new NullPointerException();
510      }
511      bitField0_ |= 0x00000001;
512            identifier_ = value;
513            onChanged();
514            return this;
515          }
516    
517          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.JournalIdProto)
518        }
519    
520        static {
521          defaultInstance = new JournalIdProto(true);
522          defaultInstance.initFields();
523        }
524    
525        // @@protoc_insertion_point(class_scope:hadoop.hdfs.JournalIdProto)
526      }
527    
528      public interface RequestInfoProtoOrBuilder
529          extends com.google.protobuf.MessageOrBuilder {
530    
531        // required .hadoop.hdfs.JournalIdProto journalId = 1;
532        /**
533         * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
534         */
535        boolean hasJournalId();
536        /**
537         * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
538         */
539        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJournalId();
540        /**
541         * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
542         */
543        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJournalIdOrBuilder();
544    
545        // required uint64 epoch = 2;
546        /**
547         * <code>required uint64 epoch = 2;</code>
548         */
549        boolean hasEpoch();
550        /**
551         * <code>required uint64 epoch = 2;</code>
552         */
553        long getEpoch();
554    
555        // required uint64 ipcSerialNumber = 3;
556        /**
557         * <code>required uint64 ipcSerialNumber = 3;</code>
558         */
559        boolean hasIpcSerialNumber();
560        /**
561         * <code>required uint64 ipcSerialNumber = 3;</code>
562         */
563        long getIpcSerialNumber();
564    
565        // optional uint64 committedTxId = 4;
566        /**
567         * <code>optional uint64 committedTxId = 4;</code>
568         *
569         * <pre>
570         * Whenever a writer makes a request, it informs
571         * the node of the latest committed txid. This may
572         * be higher than the transaction data included in the
573         * request itself, eg in the case that the node has
574         * fallen behind.
575         * </pre>
576         */
577        boolean hasCommittedTxId();
578        /**
579         * <code>optional uint64 committedTxId = 4;</code>
580         *
581         * <pre>
582         * Whenever a writer makes a request, it informs
583         * the node of the latest committed txid. This may
584         * be higher than the transaction data included in the
585         * request itself, eg in the case that the node has
586         * fallen behind.
587         * </pre>
588         */
589        long getCommittedTxId();
590      }
591      /**
592       * Protobuf type {@code hadoop.hdfs.RequestInfoProto}
593       */
594      public static final class RequestInfoProto extends
595          com.google.protobuf.GeneratedMessage
596          implements RequestInfoProtoOrBuilder {
597        // Use RequestInfoProto.newBuilder() to construct.
598        private RequestInfoProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
599          super(builder);
600          this.unknownFields = builder.getUnknownFields();
601        }
602        private RequestInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
603    
604        private static final RequestInfoProto defaultInstance;
605        public static RequestInfoProto getDefaultInstance() {
606          return defaultInstance;
607        }
608    
609        public RequestInfoProto getDefaultInstanceForType() {
610          return defaultInstance;
611        }
612    
613        private final com.google.protobuf.UnknownFieldSet unknownFields;
614        @java.lang.Override
615        public final com.google.protobuf.UnknownFieldSet
616            getUnknownFields() {
617          return this.unknownFields;
618        }
619        private RequestInfoProto(
620            com.google.protobuf.CodedInputStream input,
621            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
622            throws com.google.protobuf.InvalidProtocolBufferException {
623          initFields();
624          int mutable_bitField0_ = 0;
625          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
626              com.google.protobuf.UnknownFieldSet.newBuilder();
627          try {
628            boolean done = false;
629            while (!done) {
630              int tag = input.readTag();
631              switch (tag) {
632                case 0:
633                  done = true;
634                  break;
635                default: {
636                  if (!parseUnknownField(input, unknownFields,
637                                         extensionRegistry, tag)) {
638                    done = true;
639                  }
640                  break;
641                }
642                case 10: {
643                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
644                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
645                    subBuilder = journalId_.toBuilder();
646                  }
647                  journalId_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
648                  if (subBuilder != null) {
649                    subBuilder.mergeFrom(journalId_);
650                    journalId_ = subBuilder.buildPartial();
651                  }
652                  bitField0_ |= 0x00000001;
653                  break;
654                }
655                case 16: {
656                  bitField0_ |= 0x00000002;
657                  epoch_ = input.readUInt64();
658                  break;
659                }
660                case 24: {
661                  bitField0_ |= 0x00000004;
662                  ipcSerialNumber_ = input.readUInt64();
663                  break;
664                }
665                case 32: {
666                  bitField0_ |= 0x00000008;
667                  committedTxId_ = input.readUInt64();
668                  break;
669                }
670              }
671            }
672          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
673            throw e.setUnfinishedMessage(this);
674          } catch (java.io.IOException e) {
675            throw new com.google.protobuf.InvalidProtocolBufferException(
676                e.getMessage()).setUnfinishedMessage(this);
677          } finally {
678            this.unknownFields = unknownFields.build();
679            makeExtensionsImmutable();
680          }
681        }
682        public static final com.google.protobuf.Descriptors.Descriptor
683            getDescriptor() {
684          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_RequestInfoProto_descriptor;
685        }
686    
687        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
688            internalGetFieldAccessorTable() {
689          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_RequestInfoProto_fieldAccessorTable
690              .ensureFieldAccessorsInitialized(
691                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder.class);
692        }
693    
694        public static com.google.protobuf.Parser<RequestInfoProto> PARSER =
695            new com.google.protobuf.AbstractParser<RequestInfoProto>() {
696          public RequestInfoProto parsePartialFrom(
697              com.google.protobuf.CodedInputStream input,
698              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
699              throws com.google.protobuf.InvalidProtocolBufferException {
700            return new RequestInfoProto(input, extensionRegistry);
701          }
702        };
703    
704        @java.lang.Override
705        public com.google.protobuf.Parser<RequestInfoProto> getParserForType() {
706          return PARSER;
707        }
708    
709        private int bitField0_;
710        // required .hadoop.hdfs.JournalIdProto journalId = 1;
711        public static final int JOURNALID_FIELD_NUMBER = 1;
712        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto journalId_;
713        /**
714         * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
715         */
716        public boolean hasJournalId() {
717          return ((bitField0_ & 0x00000001) == 0x00000001);
718        }
719        /**
720         * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
721         */
722        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJournalId() {
723          return journalId_;
724        }
725        /**
726         * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
727         */
728        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJournalIdOrBuilder() {
729          return journalId_;
730        }
731    
732        // required uint64 epoch = 2;
733        public static final int EPOCH_FIELD_NUMBER = 2;
734        private long epoch_;
735        /**
736         * <code>required uint64 epoch = 2;</code>
737         */
738        public boolean hasEpoch() {
739          return ((bitField0_ & 0x00000002) == 0x00000002);
740        }
741        /**
742         * <code>required uint64 epoch = 2;</code>
743         */
744        public long getEpoch() {
745          return epoch_;
746        }
747    
748        // required uint64 ipcSerialNumber = 3;
749        public static final int IPCSERIALNUMBER_FIELD_NUMBER = 3;
750        private long ipcSerialNumber_;
751        /**
752         * <code>required uint64 ipcSerialNumber = 3;</code>
753         */
754        public boolean hasIpcSerialNumber() {
755          return ((bitField0_ & 0x00000004) == 0x00000004);
756        }
757        /**
758         * <code>required uint64 ipcSerialNumber = 3;</code>
759         */
760        public long getIpcSerialNumber() {
761          return ipcSerialNumber_;
762        }
763    
764        // optional uint64 committedTxId = 4;
765        public static final int COMMITTEDTXID_FIELD_NUMBER = 4;
766        private long committedTxId_;
767        /**
768         * <code>optional uint64 committedTxId = 4;</code>
769         *
770         * <pre>
771         * Whenever a writer makes a request, it informs
772         * the node of the latest committed txid. This may
773         * be higher than the transaction data included in the
774         * request itself, eg in the case that the node has
775         * fallen behind.
776         * </pre>
777         */
778        public boolean hasCommittedTxId() {
779          return ((bitField0_ & 0x00000008) == 0x00000008);
780        }
781        /**
782         * <code>optional uint64 committedTxId = 4;</code>
783         *
784         * <pre>
785         * Whenever a writer makes a request, it informs
786         * the node of the latest committed txid. This may
787         * be higher than the transaction data included in the
788         * request itself, eg in the case that the node has
789         * fallen behind.
790         * </pre>
791         */
792        public long getCommittedTxId() {
793          return committedTxId_;
794        }
795    
796        private void initFields() {
797          journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
798          epoch_ = 0L;
799          ipcSerialNumber_ = 0L;
800          committedTxId_ = 0L;
801        }
802        private byte memoizedIsInitialized = -1;
803        public final boolean isInitialized() {
804          byte isInitialized = memoizedIsInitialized;
805          if (isInitialized != -1) return isInitialized == 1;
806    
807          if (!hasJournalId()) {
808            memoizedIsInitialized = 0;
809            return false;
810          }
811          if (!hasEpoch()) {
812            memoizedIsInitialized = 0;
813            return false;
814          }
815          if (!hasIpcSerialNumber()) {
816            memoizedIsInitialized = 0;
817            return false;
818          }
819          if (!getJournalId().isInitialized()) {
820            memoizedIsInitialized = 0;
821            return false;
822          }
823          memoizedIsInitialized = 1;
824          return true;
825        }
826    
827        public void writeTo(com.google.protobuf.CodedOutputStream output)
828                            throws java.io.IOException {
829          getSerializedSize();
830          if (((bitField0_ & 0x00000001) == 0x00000001)) {
831            output.writeMessage(1, journalId_);
832          }
833          if (((bitField0_ & 0x00000002) == 0x00000002)) {
834            output.writeUInt64(2, epoch_);
835          }
836          if (((bitField0_ & 0x00000004) == 0x00000004)) {
837            output.writeUInt64(3, ipcSerialNumber_);
838          }
839          if (((bitField0_ & 0x00000008) == 0x00000008)) {
840            output.writeUInt64(4, committedTxId_);
841          }
842          getUnknownFields().writeTo(output);
843        }
844    
845        private int memoizedSerializedSize = -1;
846        public int getSerializedSize() {
847          int size = memoizedSerializedSize;
848          if (size != -1) return size;
849    
850          size = 0;
851          if (((bitField0_ & 0x00000001) == 0x00000001)) {
852            size += com.google.protobuf.CodedOutputStream
853              .computeMessageSize(1, journalId_);
854          }
855          if (((bitField0_ & 0x00000002) == 0x00000002)) {
856            size += com.google.protobuf.CodedOutputStream
857              .computeUInt64Size(2, epoch_);
858          }
859          if (((bitField0_ & 0x00000004) == 0x00000004)) {
860            size += com.google.protobuf.CodedOutputStream
861              .computeUInt64Size(3, ipcSerialNumber_);
862          }
863          if (((bitField0_ & 0x00000008) == 0x00000008)) {
864            size += com.google.protobuf.CodedOutputStream
865              .computeUInt64Size(4, committedTxId_);
866          }
867          size += getUnknownFields().getSerializedSize();
868          memoizedSerializedSize = size;
869          return size;
870        }
871    
872        private static final long serialVersionUID = 0L;
873        @java.lang.Override
874        protected java.lang.Object writeReplace()
875            throws java.io.ObjectStreamException {
876          return super.writeReplace();
877        }
878    
879        @java.lang.Override
880        public boolean equals(final java.lang.Object obj) {
881          if (obj == this) {
882           return true;
883          }
884          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto)) {
885            return super.equals(obj);
886          }
887          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto) obj;
888    
889          boolean result = true;
890          result = result && (hasJournalId() == other.hasJournalId());
891          if (hasJournalId()) {
892            result = result && getJournalId()
893                .equals(other.getJournalId());
894          }
895          result = result && (hasEpoch() == other.hasEpoch());
896          if (hasEpoch()) {
897            result = result && (getEpoch()
898                == other.getEpoch());
899          }
900          result = result && (hasIpcSerialNumber() == other.hasIpcSerialNumber());
901          if (hasIpcSerialNumber()) {
902            result = result && (getIpcSerialNumber()
903                == other.getIpcSerialNumber());
904          }
905          result = result && (hasCommittedTxId() == other.hasCommittedTxId());
906          if (hasCommittedTxId()) {
907            result = result && (getCommittedTxId()
908                == other.getCommittedTxId());
909          }
910          result = result &&
911              getUnknownFields().equals(other.getUnknownFields());
912          return result;
913        }
914    
915        private int memoizedHashCode = 0;
916        @java.lang.Override
917        public int hashCode() {
918          if (memoizedHashCode != 0) {
919            return memoizedHashCode;
920          }
921          int hash = 41;
922          hash = (19 * hash) + getDescriptorForType().hashCode();
923          if (hasJournalId()) {
924            hash = (37 * hash) + JOURNALID_FIELD_NUMBER;
925            hash = (53 * hash) + getJournalId().hashCode();
926          }
927          if (hasEpoch()) {
928            hash = (37 * hash) + EPOCH_FIELD_NUMBER;
929            hash = (53 * hash) + hashLong(getEpoch());
930          }
931          if (hasIpcSerialNumber()) {
932            hash = (37 * hash) + IPCSERIALNUMBER_FIELD_NUMBER;
933            hash = (53 * hash) + hashLong(getIpcSerialNumber());
934          }
935          if (hasCommittedTxId()) {
936            hash = (37 * hash) + COMMITTEDTXID_FIELD_NUMBER;
937            hash = (53 * hash) + hashLong(getCommittedTxId());
938          }
939          hash = (29 * hash) + getUnknownFields().hashCode();
940          memoizedHashCode = hash;
941          return hash;
942        }
943    
944        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
945            com.google.protobuf.ByteString data)
946            throws com.google.protobuf.InvalidProtocolBufferException {
947          return PARSER.parseFrom(data);
948        }
949        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
950            com.google.protobuf.ByteString data,
951            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
952            throws com.google.protobuf.InvalidProtocolBufferException {
953          return PARSER.parseFrom(data, extensionRegistry);
954        }
955        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(byte[] data)
956            throws com.google.protobuf.InvalidProtocolBufferException {
957          return PARSER.parseFrom(data);
958        }
959        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
960            byte[] data,
961            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
962            throws com.google.protobuf.InvalidProtocolBufferException {
963          return PARSER.parseFrom(data, extensionRegistry);
964        }
965        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(java.io.InputStream input)
966            throws java.io.IOException {
967          return PARSER.parseFrom(input);
968        }
969        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
970            java.io.InputStream input,
971            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
972            throws java.io.IOException {
973          return PARSER.parseFrom(input, extensionRegistry);
974        }
975        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseDelimitedFrom(java.io.InputStream input)
976            throws java.io.IOException {
977          return PARSER.parseDelimitedFrom(input);
978        }
979        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseDelimitedFrom(
980            java.io.InputStream input,
981            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
982            throws java.io.IOException {
983          return PARSER.parseDelimitedFrom(input, extensionRegistry);
984        }
985        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
986            com.google.protobuf.CodedInputStream input)
987            throws java.io.IOException {
988          return PARSER.parseFrom(input);
989        }
990        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
991            com.google.protobuf.CodedInputStream input,
992            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
993            throws java.io.IOException {
994          return PARSER.parseFrom(input, extensionRegistry);
995        }
996    
997        public static Builder newBuilder() { return Builder.create(); }
998        public Builder newBuilderForType() { return newBuilder(); }
999        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto prototype) {
1000          return newBuilder().mergeFrom(prototype);
1001        }
1002        public Builder toBuilder() { return newBuilder(this); }
1003    
1004        @java.lang.Override
1005        protected Builder newBuilderForType(
1006            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1007          Builder builder = new Builder(parent);
1008          return builder;
1009        }
1010        /**
1011         * Protobuf type {@code hadoop.hdfs.RequestInfoProto}
1012         */
1013        public static final class Builder extends
1014            com.google.protobuf.GeneratedMessage.Builder<Builder>
1015           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder {
1016          public static final com.google.protobuf.Descriptors.Descriptor
1017              getDescriptor() {
1018            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_RequestInfoProto_descriptor;
1019          }
1020    
1021          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1022              internalGetFieldAccessorTable() {
1023            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_RequestInfoProto_fieldAccessorTable
1024                .ensureFieldAccessorsInitialized(
1025                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder.class);
1026          }
1027    
1028          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder()
1029          private Builder() {
1030            maybeForceBuilderInitialization();
1031          }
1032    
1033          private Builder(
1034              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1035            super(parent);
1036            maybeForceBuilderInitialization();
1037          }
1038          private void maybeForceBuilderInitialization() {
1039            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
1040              getJournalIdFieldBuilder();
1041            }
1042          }
1043          private static Builder create() {
1044            return new Builder();
1045          }
1046    
1047          public Builder clear() {
1048            super.clear();
1049            if (journalIdBuilder_ == null) {
1050              journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
1051            } else {
1052              journalIdBuilder_.clear();
1053            }
1054            bitField0_ = (bitField0_ & ~0x00000001);
1055            epoch_ = 0L;
1056            bitField0_ = (bitField0_ & ~0x00000002);
1057            ipcSerialNumber_ = 0L;
1058            bitField0_ = (bitField0_ & ~0x00000004);
1059            committedTxId_ = 0L;
1060            bitField0_ = (bitField0_ & ~0x00000008);
1061            return this;
1062          }
1063    
1064          public Builder clone() {
1065            return create().mergeFrom(buildPartial());
1066          }
1067    
1068          public com.google.protobuf.Descriptors.Descriptor
1069              getDescriptorForType() {
1070            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_RequestInfoProto_descriptor;
1071          }
1072    
1073          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getDefaultInstanceForType() {
1074            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
1075          }
1076    
1077          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto build() {
1078            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto result = buildPartial();
1079            if (!result.isInitialized()) {
1080              throw newUninitializedMessageException(result);
1081            }
1082            return result;
1083          }
1084    
1085          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto buildPartial() {
1086            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto(this);
1087            int from_bitField0_ = bitField0_;
1088            int to_bitField0_ = 0;
1089            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
1090              to_bitField0_ |= 0x00000001;
1091            }
1092            if (journalIdBuilder_ == null) {
1093              result.journalId_ = journalId_;
1094            } else {
1095              result.journalId_ = journalIdBuilder_.build();
1096            }
1097            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
1098              to_bitField0_ |= 0x00000002;
1099            }
1100            result.epoch_ = epoch_;
1101            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
1102              to_bitField0_ |= 0x00000004;
1103            }
1104            result.ipcSerialNumber_ = ipcSerialNumber_;
1105            if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
1106              to_bitField0_ |= 0x00000008;
1107            }
1108            result.committedTxId_ = committedTxId_;
1109            result.bitField0_ = to_bitField0_;
1110            onBuilt();
1111            return result;
1112          }
1113    
1114          public Builder mergeFrom(com.google.protobuf.Message other) {
1115            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto) {
1116              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto)other);
1117            } else {
1118              super.mergeFrom(other);
1119              return this;
1120            }
1121          }
1122    
1123          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto other) {
1124            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) return this;
1125            if (other.hasJournalId()) {
1126              mergeJournalId(other.getJournalId());
1127            }
1128            if (other.hasEpoch()) {
1129              setEpoch(other.getEpoch());
1130            }
1131            if (other.hasIpcSerialNumber()) {
1132              setIpcSerialNumber(other.getIpcSerialNumber());
1133            }
1134            if (other.hasCommittedTxId()) {
1135              setCommittedTxId(other.getCommittedTxId());
1136            }
1137            this.mergeUnknownFields(other.getUnknownFields());
1138            return this;
1139          }
1140    
1141          public final boolean isInitialized() {
1142            if (!hasJournalId()) {
1143              
1144              return false;
1145            }
1146            if (!hasEpoch()) {
1147              
1148              return false;
1149            }
1150            if (!hasIpcSerialNumber()) {
1151              
1152              return false;
1153            }
1154            if (!getJournalId().isInitialized()) {
1155              
1156              return false;
1157            }
1158            return true;
1159          }
1160    
1161          public Builder mergeFrom(
1162              com.google.protobuf.CodedInputStream input,
1163              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1164              throws java.io.IOException {
1165            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parsedMessage = null;
1166            try {
1167              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
1168            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1169              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto) e.getUnfinishedMessage();
1170              throw e;
1171            } finally {
1172              if (parsedMessage != null) {
1173                mergeFrom(parsedMessage);
1174              }
1175            }
1176            return this;
1177          }
1178          private int bitField0_;
1179    
1180          // required .hadoop.hdfs.JournalIdProto journalId = 1;
1181          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
1182          private com.google.protobuf.SingleFieldBuilder<
1183              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> journalIdBuilder_;
1184          /**
1185           * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1186           */
1187          public boolean hasJournalId() {
1188            return ((bitField0_ & 0x00000001) == 0x00000001);
1189          }
1190          /**
1191           * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1192           */
1193          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJournalId() {
1194            if (journalIdBuilder_ == null) {
1195              return journalId_;
1196            } else {
1197              return journalIdBuilder_.getMessage();
1198            }
1199          }
1200          /**
1201           * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1202           */
1203          public Builder setJournalId(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
1204            if (journalIdBuilder_ == null) {
1205              if (value == null) {
1206                throw new NullPointerException();
1207              }
1208              journalId_ = value;
1209              onChanged();
1210            } else {
1211              journalIdBuilder_.setMessage(value);
1212            }
1213            bitField0_ |= 0x00000001;
1214            return this;
1215          }
1216          /**
1217           * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1218           */
1219          public Builder setJournalId(
1220              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
1221            if (journalIdBuilder_ == null) {
1222              journalId_ = builderForValue.build();
1223              onChanged();
1224            } else {
1225              journalIdBuilder_.setMessage(builderForValue.build());
1226            }
1227            bitField0_ |= 0x00000001;
1228            return this;
1229          }
1230          /**
1231           * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1232           */
1233          public Builder mergeJournalId(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
1234            if (journalIdBuilder_ == null) {
1235              if (((bitField0_ & 0x00000001) == 0x00000001) &&
1236                  journalId_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
1237                journalId_ =
1238                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(journalId_).mergeFrom(value).buildPartial();
1239              } else {
1240                journalId_ = value;
1241              }
1242              onChanged();
1243            } else {
1244              journalIdBuilder_.mergeFrom(value);
1245            }
1246            bitField0_ |= 0x00000001;
1247            return this;
1248          }
1249          /**
1250           * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1251           */
1252          public Builder clearJournalId() {
1253            if (journalIdBuilder_ == null) {
1254              journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
1255              onChanged();
1256            } else {
1257              journalIdBuilder_.clear();
1258            }
1259            bitField0_ = (bitField0_ & ~0x00000001);
1260            return this;
1261          }
1262          /**
1263           * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1264           */
1265          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJournalIdBuilder() {
1266            bitField0_ |= 0x00000001;
1267            onChanged();
1268            return getJournalIdFieldBuilder().getBuilder();
1269          }
1270          /**
1271           * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1272           */
1273          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJournalIdOrBuilder() {
1274            if (journalIdBuilder_ != null) {
1275              return journalIdBuilder_.getMessageOrBuilder();
1276            } else {
1277              return journalId_;
1278            }
1279          }
1280          /**
1281           * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1282           */
1283          private com.google.protobuf.SingleFieldBuilder<
1284              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
1285              getJournalIdFieldBuilder() {
1286            if (journalIdBuilder_ == null) {
1287              journalIdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
1288                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
1289                      journalId_,
1290                      getParentForChildren(),
1291                      isClean());
1292              journalId_ = null;
1293            }
1294            return journalIdBuilder_;
1295          }
1296    
1297          // required uint64 epoch = 2;
1298          private long epoch_ ;
1299          /**
1300           * <code>required uint64 epoch = 2;</code>
1301           */
1302          public boolean hasEpoch() {
1303            return ((bitField0_ & 0x00000002) == 0x00000002);
1304          }
1305          /**
1306           * <code>required uint64 epoch = 2;</code>
1307           */
1308          public long getEpoch() {
1309            return epoch_;
1310          }
1311          /**
1312           * <code>required uint64 epoch = 2;</code>
1313           */
1314          public Builder setEpoch(long value) {
1315            bitField0_ |= 0x00000002;
1316            epoch_ = value;
1317            onChanged();
1318            return this;
1319          }
1320          /**
1321           * <code>required uint64 epoch = 2;</code>
1322           */
1323          public Builder clearEpoch() {
1324            bitField0_ = (bitField0_ & ~0x00000002);
1325            epoch_ = 0L;
1326            onChanged();
1327            return this;
1328          }
1329    
1330          // required uint64 ipcSerialNumber = 3;
1331          private long ipcSerialNumber_ ;
1332          /**
1333           * <code>required uint64 ipcSerialNumber = 3;</code>
1334           */
1335          public boolean hasIpcSerialNumber() {
1336            return ((bitField0_ & 0x00000004) == 0x00000004);
1337          }
1338          /**
1339           * <code>required uint64 ipcSerialNumber = 3;</code>
1340           */
1341          public long getIpcSerialNumber() {
1342            return ipcSerialNumber_;
1343          }
1344          /**
1345           * <code>required uint64 ipcSerialNumber = 3;</code>
1346           */
1347          public Builder setIpcSerialNumber(long value) {
1348            bitField0_ |= 0x00000004;
1349            ipcSerialNumber_ = value;
1350            onChanged();
1351            return this;
1352          }
1353          /**
1354           * <code>required uint64 ipcSerialNumber = 3;</code>
1355           */
1356          public Builder clearIpcSerialNumber() {
1357            bitField0_ = (bitField0_ & ~0x00000004);
1358            ipcSerialNumber_ = 0L;
1359            onChanged();
1360            return this;
1361          }
1362    
1363          // optional uint64 committedTxId = 4;
1364          private long committedTxId_ ;
1365          /**
1366           * <code>optional uint64 committedTxId = 4;</code>
1367           *
1368           * <pre>
1369           * Whenever a writer makes a request, it informs
1370           * the node of the latest committed txid. This may
1371           * be higher than the transaction data included in the
1372           * request itself, eg in the case that the node has
1373           * fallen behind.
1374           * </pre>
1375           */
1376          public boolean hasCommittedTxId() {
1377            return ((bitField0_ & 0x00000008) == 0x00000008);
1378          }
1379          /**
1380           * <code>optional uint64 committedTxId = 4;</code>
1381           *
1382           * <pre>
1383           * Whenever a writer makes a request, it informs
1384           * the node of the latest committed txid. This may
1385           * be higher than the transaction data included in the
1386           * request itself, eg in the case that the node has
1387           * fallen behind.
1388           * </pre>
1389           */
1390          public long getCommittedTxId() {
1391            return committedTxId_;
1392          }
1393          /**
1394           * <code>optional uint64 committedTxId = 4;</code>
1395           *
1396           * <pre>
1397           * Whenever a writer makes a request, it informs
1398           * the node of the latest committed txid. This may
1399           * be higher than the transaction data included in the
1400           * request itself, eg in the case that the node has
1401           * fallen behind.
1402           * </pre>
1403           */
1404          public Builder setCommittedTxId(long value) {
1405            bitField0_ |= 0x00000008;
1406            committedTxId_ = value;
1407            onChanged();
1408            return this;
1409          }
1410          /**
1411           * <code>optional uint64 committedTxId = 4;</code>
1412           *
1413           * <pre>
1414           * Whenever a writer makes a request, it informs
1415           * the node of the latest committed txid. This may
1416           * be higher than the transaction data included in the
1417           * request itself, eg in the case that the node has
1418           * fallen behind.
1419           * </pre>
1420           */
1421          public Builder clearCommittedTxId() {
1422            bitField0_ = (bitField0_ & ~0x00000008);
1423            committedTxId_ = 0L;
1424            onChanged();
1425            return this;
1426          }
1427    
1428          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RequestInfoProto)
1429        }
1430    
1431        static {
1432          defaultInstance = new RequestInfoProto(true);
1433          defaultInstance.initFields();
1434        }
1435    
1436        // @@protoc_insertion_point(class_scope:hadoop.hdfs.RequestInfoProto)
1437      }
1438    
1439      public interface SegmentStateProtoOrBuilder
1440          extends com.google.protobuf.MessageOrBuilder {
1441    
1442        // required uint64 startTxId = 1;
1443        /**
1444         * <code>required uint64 startTxId = 1;</code>
1445         */
1446        boolean hasStartTxId();
1447        /**
1448         * <code>required uint64 startTxId = 1;</code>
1449         */
1450        long getStartTxId();
1451    
1452        // required uint64 endTxId = 2;
1453        /**
1454         * <code>required uint64 endTxId = 2;</code>
1455         */
1456        boolean hasEndTxId();
1457        /**
1458         * <code>required uint64 endTxId = 2;</code>
1459         */
1460        long getEndTxId();
1461    
1462        // required bool isInProgress = 3;
1463        /**
1464         * <code>required bool isInProgress = 3;</code>
1465         */
1466        boolean hasIsInProgress();
1467        /**
1468         * <code>required bool isInProgress = 3;</code>
1469         */
1470        boolean getIsInProgress();
1471      }
1472      /**
1473       * Protobuf type {@code hadoop.hdfs.SegmentStateProto}
1474       */
1475      public static final class SegmentStateProto extends
1476          com.google.protobuf.GeneratedMessage
1477          implements SegmentStateProtoOrBuilder {
1478        // Use SegmentStateProto.newBuilder() to construct.
1479        private SegmentStateProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
1480          super(builder);
1481          this.unknownFields = builder.getUnknownFields();
1482        }
1483        private SegmentStateProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
1484    
1485        private static final SegmentStateProto defaultInstance;
1486        public static SegmentStateProto getDefaultInstance() {
1487          return defaultInstance;
1488        }
1489    
1490        public SegmentStateProto getDefaultInstanceForType() {
1491          return defaultInstance;
1492        }
1493    
1494        private final com.google.protobuf.UnknownFieldSet unknownFields;
1495        @java.lang.Override
1496        public final com.google.protobuf.UnknownFieldSet
1497            getUnknownFields() {
1498          return this.unknownFields;
1499        }
1500        private SegmentStateProto(
1501            com.google.protobuf.CodedInputStream input,
1502            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1503            throws com.google.protobuf.InvalidProtocolBufferException {
1504          initFields();
1505          int mutable_bitField0_ = 0;
1506          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
1507              com.google.protobuf.UnknownFieldSet.newBuilder();
1508          try {
1509            boolean done = false;
1510            while (!done) {
1511              int tag = input.readTag();
1512              switch (tag) {
1513                case 0:
1514                  done = true;
1515                  break;
1516                default: {
1517                  if (!parseUnknownField(input, unknownFields,
1518                                         extensionRegistry, tag)) {
1519                    done = true;
1520                  }
1521                  break;
1522                }
1523                case 8: {
1524                  bitField0_ |= 0x00000001;
1525                  startTxId_ = input.readUInt64();
1526                  break;
1527                }
1528                case 16: {
1529                  bitField0_ |= 0x00000002;
1530                  endTxId_ = input.readUInt64();
1531                  break;
1532                }
1533                case 24: {
1534                  bitField0_ |= 0x00000004;
1535                  isInProgress_ = input.readBool();
1536                  break;
1537                }
1538              }
1539            }
1540          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1541            throw e.setUnfinishedMessage(this);
1542          } catch (java.io.IOException e) {
1543            throw new com.google.protobuf.InvalidProtocolBufferException(
1544                e.getMessage()).setUnfinishedMessage(this);
1545          } finally {
1546            this.unknownFields = unknownFields.build();
1547            makeExtensionsImmutable();
1548          }
1549        }
1550        public static final com.google.protobuf.Descriptors.Descriptor
1551            getDescriptor() {
1552          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_SegmentStateProto_descriptor;
1553        }
1554    
1555        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1556            internalGetFieldAccessorTable() {
1557          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_SegmentStateProto_fieldAccessorTable
1558              .ensureFieldAccessorsInitialized(
1559                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder.class);
1560        }
1561    
1562        public static com.google.protobuf.Parser<SegmentStateProto> PARSER =
1563            new com.google.protobuf.AbstractParser<SegmentStateProto>() {
1564          public SegmentStateProto parsePartialFrom(
1565              com.google.protobuf.CodedInputStream input,
1566              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1567              throws com.google.protobuf.InvalidProtocolBufferException {
1568            return new SegmentStateProto(input, extensionRegistry);
1569          }
1570        };
1571    
1572        @java.lang.Override
1573        public com.google.protobuf.Parser<SegmentStateProto> getParserForType() {
1574          return PARSER;
1575        }
1576    
1577        private int bitField0_;
1578        // required uint64 startTxId = 1;
1579        public static final int STARTTXID_FIELD_NUMBER = 1;
1580        private long startTxId_;
1581        /**
1582         * <code>required uint64 startTxId = 1;</code>
1583         */
1584        public boolean hasStartTxId() {
1585          return ((bitField0_ & 0x00000001) == 0x00000001);
1586        }
1587        /**
1588         * <code>required uint64 startTxId = 1;</code>
1589         */
1590        public long getStartTxId() {
1591          return startTxId_;
1592        }
1593    
1594        // required uint64 endTxId = 2;
1595        public static final int ENDTXID_FIELD_NUMBER = 2;
1596        private long endTxId_;
1597        /**
1598         * <code>required uint64 endTxId = 2;</code>
1599         */
1600        public boolean hasEndTxId() {
1601          return ((bitField0_ & 0x00000002) == 0x00000002);
1602        }
1603        /**
1604         * <code>required uint64 endTxId = 2;</code>
1605         */
1606        public long getEndTxId() {
1607          return endTxId_;
1608        }
1609    
1610        // required bool isInProgress = 3;
1611        public static final int ISINPROGRESS_FIELD_NUMBER = 3;
1612        private boolean isInProgress_;
1613        /**
1614         * <code>required bool isInProgress = 3;</code>
1615         */
1616        public boolean hasIsInProgress() {
1617          return ((bitField0_ & 0x00000004) == 0x00000004);
1618        }
1619        /**
1620         * <code>required bool isInProgress = 3;</code>
1621         */
1622        public boolean getIsInProgress() {
1623          return isInProgress_;
1624        }
1625    
1626        private void initFields() {
1627          startTxId_ = 0L;
1628          endTxId_ = 0L;
1629          isInProgress_ = false;
1630        }
1631        private byte memoizedIsInitialized = -1;
1632        public final boolean isInitialized() {
1633          byte isInitialized = memoizedIsInitialized;
1634          if (isInitialized != -1) return isInitialized == 1;
1635    
1636          if (!hasStartTxId()) {
1637            memoizedIsInitialized = 0;
1638            return false;
1639          }
1640          if (!hasEndTxId()) {
1641            memoizedIsInitialized = 0;
1642            return false;
1643          }
1644          if (!hasIsInProgress()) {
1645            memoizedIsInitialized = 0;
1646            return false;
1647          }
1648          memoizedIsInitialized = 1;
1649          return true;
1650        }
1651    
1652        public void writeTo(com.google.protobuf.CodedOutputStream output)
1653                            throws java.io.IOException {
1654          getSerializedSize();
1655          if (((bitField0_ & 0x00000001) == 0x00000001)) {
1656            output.writeUInt64(1, startTxId_);
1657          }
1658          if (((bitField0_ & 0x00000002) == 0x00000002)) {
1659            output.writeUInt64(2, endTxId_);
1660          }
1661          if (((bitField0_ & 0x00000004) == 0x00000004)) {
1662            output.writeBool(3, isInProgress_);
1663          }
1664          getUnknownFields().writeTo(output);
1665        }
1666    
1667        private int memoizedSerializedSize = -1;
1668        public int getSerializedSize() {
1669          int size = memoizedSerializedSize;
1670          if (size != -1) return size;
1671    
1672          size = 0;
1673          if (((bitField0_ & 0x00000001) == 0x00000001)) {
1674            size += com.google.protobuf.CodedOutputStream
1675              .computeUInt64Size(1, startTxId_);
1676          }
1677          if (((bitField0_ & 0x00000002) == 0x00000002)) {
1678            size += com.google.protobuf.CodedOutputStream
1679              .computeUInt64Size(2, endTxId_);
1680          }
1681          if (((bitField0_ & 0x00000004) == 0x00000004)) {
1682            size += com.google.protobuf.CodedOutputStream
1683              .computeBoolSize(3, isInProgress_);
1684          }
1685          size += getUnknownFields().getSerializedSize();
1686          memoizedSerializedSize = size;
1687          return size;
1688        }
1689    
1690        private static final long serialVersionUID = 0L;
1691        @java.lang.Override
1692        protected java.lang.Object writeReplace()
1693            throws java.io.ObjectStreamException {
1694          return super.writeReplace();
1695        }
1696    
1697        @java.lang.Override
1698        public boolean equals(final java.lang.Object obj) {
1699          if (obj == this) {
1700           return true;
1701          }
1702          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto)) {
1703            return super.equals(obj);
1704          }
1705          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto) obj;
1706    
1707          boolean result = true;
1708          result = result && (hasStartTxId() == other.hasStartTxId());
1709          if (hasStartTxId()) {
1710            result = result && (getStartTxId()
1711                == other.getStartTxId());
1712          }
1713          result = result && (hasEndTxId() == other.hasEndTxId());
1714          if (hasEndTxId()) {
1715            result = result && (getEndTxId()
1716                == other.getEndTxId());
1717          }
1718          result = result && (hasIsInProgress() == other.hasIsInProgress());
1719          if (hasIsInProgress()) {
1720            result = result && (getIsInProgress()
1721                == other.getIsInProgress());
1722          }
1723          result = result &&
1724              getUnknownFields().equals(other.getUnknownFields());
1725          return result;
1726        }
1727    
1728        private int memoizedHashCode = 0;
1729        @java.lang.Override
1730        public int hashCode() {
1731          if (memoizedHashCode != 0) {
1732            return memoizedHashCode;
1733          }
1734          int hash = 41;
1735          hash = (19 * hash) + getDescriptorForType().hashCode();
1736          if (hasStartTxId()) {
1737            hash = (37 * hash) + STARTTXID_FIELD_NUMBER;
1738            hash = (53 * hash) + hashLong(getStartTxId());
1739          }
1740          if (hasEndTxId()) {
1741            hash = (37 * hash) + ENDTXID_FIELD_NUMBER;
1742            hash = (53 * hash) + hashLong(getEndTxId());
1743          }
1744          if (hasIsInProgress()) {
1745            hash = (37 * hash) + ISINPROGRESS_FIELD_NUMBER;
1746            hash = (53 * hash) + hashBoolean(getIsInProgress());
1747          }
1748          hash = (29 * hash) + getUnknownFields().hashCode();
1749          memoizedHashCode = hash;
1750          return hash;
1751        }
1752    
1753        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1754            com.google.protobuf.ByteString data)
1755            throws com.google.protobuf.InvalidProtocolBufferException {
1756          return PARSER.parseFrom(data);
1757        }
1758        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1759            com.google.protobuf.ByteString data,
1760            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1761            throws com.google.protobuf.InvalidProtocolBufferException {
1762          return PARSER.parseFrom(data, extensionRegistry);
1763        }
1764        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(byte[] data)
1765            throws com.google.protobuf.InvalidProtocolBufferException {
1766          return PARSER.parseFrom(data);
1767        }
1768        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1769            byte[] data,
1770            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1771            throws com.google.protobuf.InvalidProtocolBufferException {
1772          return PARSER.parseFrom(data, extensionRegistry);
1773        }
1774        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(java.io.InputStream input)
1775            throws java.io.IOException {
1776          return PARSER.parseFrom(input);
1777        }
1778        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1779            java.io.InputStream input,
1780            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1781            throws java.io.IOException {
1782          return PARSER.parseFrom(input, extensionRegistry);
1783        }
1784        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseDelimitedFrom(java.io.InputStream input)
1785            throws java.io.IOException {
1786          return PARSER.parseDelimitedFrom(input);
1787        }
1788        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseDelimitedFrom(
1789            java.io.InputStream input,
1790            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1791            throws java.io.IOException {
1792          return PARSER.parseDelimitedFrom(input, extensionRegistry);
1793        }
1794        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1795            com.google.protobuf.CodedInputStream input)
1796            throws java.io.IOException {
1797          return PARSER.parseFrom(input);
1798        }
1799        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1800            com.google.protobuf.CodedInputStream input,
1801            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1802            throws java.io.IOException {
1803          return PARSER.parseFrom(input, extensionRegistry);
1804        }
1805    
1806        public static Builder newBuilder() { return Builder.create(); }
1807        public Builder newBuilderForType() { return newBuilder(); }
1808        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto prototype) {
1809          return newBuilder().mergeFrom(prototype);
1810        }
1811        public Builder toBuilder() { return newBuilder(this); }
1812    
1813        @java.lang.Override
1814        protected Builder newBuilderForType(
1815            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1816          Builder builder = new Builder(parent);
1817          return builder;
1818        }
1819        /**
1820         * Protobuf type {@code hadoop.hdfs.SegmentStateProto}
1821         */
1822        public static final class Builder extends
1823            com.google.protobuf.GeneratedMessage.Builder<Builder>
1824           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder {
1825          public static final com.google.protobuf.Descriptors.Descriptor
1826              getDescriptor() {
1827            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_SegmentStateProto_descriptor;
1828          }
1829    
1830          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1831              internalGetFieldAccessorTable() {
1832            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_SegmentStateProto_fieldAccessorTable
1833                .ensureFieldAccessorsInitialized(
1834                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder.class);
1835          }
1836    
1837          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder()
1838          private Builder() {
1839            maybeForceBuilderInitialization();
1840          }
1841    
1842          private Builder(
1843              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1844            super(parent);
1845            maybeForceBuilderInitialization();
1846          }
1847          private void maybeForceBuilderInitialization() {
1848            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
1849            }
1850          }
1851          private static Builder create() {
1852            return new Builder();
1853          }
1854    
1855          public Builder clear() {
1856            super.clear();
1857            startTxId_ = 0L;
1858            bitField0_ = (bitField0_ & ~0x00000001);
1859            endTxId_ = 0L;
1860            bitField0_ = (bitField0_ & ~0x00000002);
1861            isInProgress_ = false;
1862            bitField0_ = (bitField0_ & ~0x00000004);
1863            return this;
1864          }
1865    
1866          public Builder clone() {
1867            return create().mergeFrom(buildPartial());
1868          }
1869    
1870          public com.google.protobuf.Descriptors.Descriptor
1871              getDescriptorForType() {
1872            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_SegmentStateProto_descriptor;
1873          }
1874    
1875          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getDefaultInstanceForType() {
1876            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
1877          }
1878    
1879          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto build() {
1880            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto result = buildPartial();
1881            if (!result.isInitialized()) {
1882              throw newUninitializedMessageException(result);
1883            }
1884            return result;
1885          }
1886    
1887          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto buildPartial() {
1888            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto(this);
1889            int from_bitField0_ = bitField0_;
1890            int to_bitField0_ = 0;
1891            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
1892              to_bitField0_ |= 0x00000001;
1893            }
1894            result.startTxId_ = startTxId_;
1895            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
1896              to_bitField0_ |= 0x00000002;
1897            }
1898            result.endTxId_ = endTxId_;
1899            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
1900              to_bitField0_ |= 0x00000004;
1901            }
1902            result.isInProgress_ = isInProgress_;
1903            result.bitField0_ = to_bitField0_;
1904            onBuilt();
1905            return result;
1906          }
1907    
1908          public Builder mergeFrom(com.google.protobuf.Message other) {
1909            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto) {
1910              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto)other);
1911            } else {
1912              super.mergeFrom(other);
1913              return this;
1914            }
1915          }
1916    
1917          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto other) {
1918            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) return this;
1919            if (other.hasStartTxId()) {
1920              setStartTxId(other.getStartTxId());
1921            }
1922            if (other.hasEndTxId()) {
1923              setEndTxId(other.getEndTxId());
1924            }
1925            if (other.hasIsInProgress()) {
1926              setIsInProgress(other.getIsInProgress());
1927            }
1928            this.mergeUnknownFields(other.getUnknownFields());
1929            return this;
1930          }
1931    
1932          public final boolean isInitialized() {
1933            if (!hasStartTxId()) {
1934              
1935              return false;
1936            }
1937            if (!hasEndTxId()) {
1938              
1939              return false;
1940            }
1941            if (!hasIsInProgress()) {
1942              
1943              return false;
1944            }
1945            return true;
1946          }
1947    
1948          public Builder mergeFrom(
1949              com.google.protobuf.CodedInputStream input,
1950              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1951              throws java.io.IOException {
1952            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parsedMessage = null;
1953            try {
1954              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
1955            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1956              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto) e.getUnfinishedMessage();
1957              throw e;
1958            } finally {
1959              if (parsedMessage != null) {
1960                mergeFrom(parsedMessage);
1961              }
1962            }
1963            return this;
1964          }
1965          private int bitField0_;
1966    
1967          // required uint64 startTxId = 1;
1968          private long startTxId_ ;
1969          /**
1970           * <code>required uint64 startTxId = 1;</code>
1971           */
1972          public boolean hasStartTxId() {
1973            return ((bitField0_ & 0x00000001) == 0x00000001);
1974          }
1975          /**
1976           * <code>required uint64 startTxId = 1;</code>
1977           */
1978          public long getStartTxId() {
1979            return startTxId_;
1980          }
1981          /**
1982           * <code>required uint64 startTxId = 1;</code>
1983           */
1984          public Builder setStartTxId(long value) {
1985            bitField0_ |= 0x00000001;
1986            startTxId_ = value;
1987            onChanged();
1988            return this;
1989          }
1990          /**
1991           * <code>required uint64 startTxId = 1;</code>
1992           */
1993          public Builder clearStartTxId() {
1994            bitField0_ = (bitField0_ & ~0x00000001);
1995            startTxId_ = 0L;
1996            onChanged();
1997            return this;
1998          }
1999    
2000          // required uint64 endTxId = 2;
2001          private long endTxId_ ;
2002          /**
2003           * <code>required uint64 endTxId = 2;</code>
2004           */
2005          public boolean hasEndTxId() {
2006            return ((bitField0_ & 0x00000002) == 0x00000002);
2007          }
2008          /**
2009           * <code>required uint64 endTxId = 2;</code>
2010           */
2011          public long getEndTxId() {
2012            return endTxId_;
2013          }
2014          /**
2015           * <code>required uint64 endTxId = 2;</code>
2016           */
2017          public Builder setEndTxId(long value) {
2018            bitField0_ |= 0x00000002;
2019            endTxId_ = value;
2020            onChanged();
2021            return this;
2022          }
2023          /**
2024           * <code>required uint64 endTxId = 2;</code>
2025           */
2026          public Builder clearEndTxId() {
2027            bitField0_ = (bitField0_ & ~0x00000002);
2028            endTxId_ = 0L;
2029            onChanged();
2030            return this;
2031          }
2032    
2033          // required bool isInProgress = 3;
2034          private boolean isInProgress_ ;
2035          /**
2036           * <code>required bool isInProgress = 3;</code>
2037           */
2038          public boolean hasIsInProgress() {
2039            return ((bitField0_ & 0x00000004) == 0x00000004);
2040          }
2041          /**
2042           * <code>required bool isInProgress = 3;</code>
2043           */
2044          public boolean getIsInProgress() {
2045            return isInProgress_;
2046          }
2047          /**
2048           * <code>required bool isInProgress = 3;</code>
2049           */
2050          public Builder setIsInProgress(boolean value) {
2051            bitField0_ |= 0x00000004;
2052            isInProgress_ = value;
2053            onChanged();
2054            return this;
2055          }
2056          /**
2057           * <code>required bool isInProgress = 3;</code>
2058           */
2059          public Builder clearIsInProgress() {
2060            bitField0_ = (bitField0_ & ~0x00000004);
2061            isInProgress_ = false;
2062            onChanged();
2063            return this;
2064          }
2065    
2066          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SegmentStateProto)
2067        }
2068    
2069        static {
2070          defaultInstance = new SegmentStateProto(true);
2071          defaultInstance.initFields();
2072        }
2073    
2074        // @@protoc_insertion_point(class_scope:hadoop.hdfs.SegmentStateProto)
2075      }
2076    
2077      public interface PersistedRecoveryPaxosDataOrBuilder
2078          extends com.google.protobuf.MessageOrBuilder {
2079    
2080        // required .hadoop.hdfs.SegmentStateProto segmentState = 1;
2081        /**
2082         * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2083         */
2084        boolean hasSegmentState();
2085        /**
2086         * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2087         */
2088        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState();
2089        /**
2090         * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2091         */
2092        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder();
2093    
2094        // required uint64 acceptedInEpoch = 2;
2095        /**
2096         * <code>required uint64 acceptedInEpoch = 2;</code>
2097         */
2098        boolean hasAcceptedInEpoch();
2099        /**
2100         * <code>required uint64 acceptedInEpoch = 2;</code>
2101         */
2102        long getAcceptedInEpoch();
2103      }
2104      /**
2105       * Protobuf type {@code hadoop.hdfs.PersistedRecoveryPaxosData}
2106       *
2107       * <pre>
2108       **
2109       * The storage format used on local disk for previously
2110       * accepted decisions.
2111       * </pre>
2112       */
2113      public static final class PersistedRecoveryPaxosData extends
2114          com.google.protobuf.GeneratedMessage
2115          implements PersistedRecoveryPaxosDataOrBuilder {
2116        // Use PersistedRecoveryPaxosData.newBuilder() to construct.
2117        private PersistedRecoveryPaxosData(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
2118          super(builder);
2119          this.unknownFields = builder.getUnknownFields();
2120        }
2121        private PersistedRecoveryPaxosData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
2122    
2123        private static final PersistedRecoveryPaxosData defaultInstance;
2124        public static PersistedRecoveryPaxosData getDefaultInstance() {
2125          return defaultInstance;
2126        }
2127    
2128        public PersistedRecoveryPaxosData getDefaultInstanceForType() {
2129          return defaultInstance;
2130        }
2131    
2132        private final com.google.protobuf.UnknownFieldSet unknownFields;
2133        @java.lang.Override
2134        public final com.google.protobuf.UnknownFieldSet
2135            getUnknownFields() {
2136          return this.unknownFields;
2137        }
2138        private PersistedRecoveryPaxosData(
2139            com.google.protobuf.CodedInputStream input,
2140            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2141            throws com.google.protobuf.InvalidProtocolBufferException {
2142          initFields();
2143          int mutable_bitField0_ = 0;
2144          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
2145              com.google.protobuf.UnknownFieldSet.newBuilder();
2146          try {
2147            boolean done = false;
2148            while (!done) {
2149              int tag = input.readTag();
2150              switch (tag) {
2151                case 0:
2152                  done = true;
2153                  break;
2154                default: {
2155                  if (!parseUnknownField(input, unknownFields,
2156                                         extensionRegistry, tag)) {
2157                    done = true;
2158                  }
2159                  break;
2160                }
2161                case 10: {
2162                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder subBuilder = null;
2163                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
2164                    subBuilder = segmentState_.toBuilder();
2165                  }
2166                  segmentState_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.PARSER, extensionRegistry);
2167                  if (subBuilder != null) {
2168                    subBuilder.mergeFrom(segmentState_);
2169                    segmentState_ = subBuilder.buildPartial();
2170                  }
2171                  bitField0_ |= 0x00000001;
2172                  break;
2173                }
2174                case 16: {
2175                  bitField0_ |= 0x00000002;
2176                  acceptedInEpoch_ = input.readUInt64();
2177                  break;
2178                }
2179              }
2180            }
2181          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
2182            throw e.setUnfinishedMessage(this);
2183          } catch (java.io.IOException e) {
2184            throw new com.google.protobuf.InvalidProtocolBufferException(
2185                e.getMessage()).setUnfinishedMessage(this);
2186          } finally {
2187            this.unknownFields = unknownFields.build();
2188            makeExtensionsImmutable();
2189          }
2190        }
2191        public static final com.google.protobuf.Descriptors.Descriptor
2192            getDescriptor() {
2193          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor;
2194        }
2195    
2196        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
2197            internalGetFieldAccessorTable() {
2198          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_fieldAccessorTable
2199              .ensureFieldAccessorsInitialized(
2200                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.Builder.class);
2201        }
2202    
2203        public static com.google.protobuf.Parser<PersistedRecoveryPaxosData> PARSER =
2204            new com.google.protobuf.AbstractParser<PersistedRecoveryPaxosData>() {
2205          public PersistedRecoveryPaxosData parsePartialFrom(
2206              com.google.protobuf.CodedInputStream input,
2207              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2208              throws com.google.protobuf.InvalidProtocolBufferException {
2209            return new PersistedRecoveryPaxosData(input, extensionRegistry);
2210          }
2211        };
2212    
2213        @java.lang.Override
2214        public com.google.protobuf.Parser<PersistedRecoveryPaxosData> getParserForType() {
2215          return PARSER;
2216        }
2217    
2218        private int bitField0_;
2219        // required .hadoop.hdfs.SegmentStateProto segmentState = 1;
2220        public static final int SEGMENTSTATE_FIELD_NUMBER = 1;
2221        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_;
2222        /**
2223         * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2224         */
2225        public boolean hasSegmentState() {
2226          return ((bitField0_ & 0x00000001) == 0x00000001);
2227        }
2228        /**
2229         * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2230         */
2231        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
2232          return segmentState_;
2233        }
2234        /**
2235         * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2236         */
2237        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
2238          return segmentState_;
2239        }
2240    
2241        // required uint64 acceptedInEpoch = 2;
2242        public static final int ACCEPTEDINEPOCH_FIELD_NUMBER = 2;
2243        private long acceptedInEpoch_;
2244        /**
2245         * <code>required uint64 acceptedInEpoch = 2;</code>
2246         */
2247        public boolean hasAcceptedInEpoch() {
2248          return ((bitField0_ & 0x00000002) == 0x00000002);
2249        }
2250        /**
2251         * <code>required uint64 acceptedInEpoch = 2;</code>
2252         */
2253        public long getAcceptedInEpoch() {
2254          return acceptedInEpoch_;
2255        }
2256    
2257        private void initFields() {
2258          segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
2259          acceptedInEpoch_ = 0L;
2260        }
2261        private byte memoizedIsInitialized = -1;
2262        public final boolean isInitialized() {
2263          byte isInitialized = memoizedIsInitialized;
2264          if (isInitialized != -1) return isInitialized == 1;
2265    
2266          if (!hasSegmentState()) {
2267            memoizedIsInitialized = 0;
2268            return false;
2269          }
2270          if (!hasAcceptedInEpoch()) {
2271            memoizedIsInitialized = 0;
2272            return false;
2273          }
2274          if (!getSegmentState().isInitialized()) {
2275            memoizedIsInitialized = 0;
2276            return false;
2277          }
2278          memoizedIsInitialized = 1;
2279          return true;
2280        }
2281    
2282        public void writeTo(com.google.protobuf.CodedOutputStream output)
2283                            throws java.io.IOException {
2284          getSerializedSize();
2285          if (((bitField0_ & 0x00000001) == 0x00000001)) {
2286            output.writeMessage(1, segmentState_);
2287          }
2288          if (((bitField0_ & 0x00000002) == 0x00000002)) {
2289            output.writeUInt64(2, acceptedInEpoch_);
2290          }
2291          getUnknownFields().writeTo(output);
2292        }
2293    
2294        private int memoizedSerializedSize = -1;
2295        public int getSerializedSize() {
2296          int size = memoizedSerializedSize;
2297          if (size != -1) return size;
2298    
2299          size = 0;
2300          if (((bitField0_ & 0x00000001) == 0x00000001)) {
2301            size += com.google.protobuf.CodedOutputStream
2302              .computeMessageSize(1, segmentState_);
2303          }
2304          if (((bitField0_ & 0x00000002) == 0x00000002)) {
2305            size += com.google.protobuf.CodedOutputStream
2306              .computeUInt64Size(2, acceptedInEpoch_);
2307          }
2308          size += getUnknownFields().getSerializedSize();
2309          memoizedSerializedSize = size;
2310          return size;
2311        }
2312    
2313        private static final long serialVersionUID = 0L;
2314        @java.lang.Override
2315        protected java.lang.Object writeReplace()
2316            throws java.io.ObjectStreamException {
2317          return super.writeReplace();
2318        }
2319    
2320        @java.lang.Override
2321        public boolean equals(final java.lang.Object obj) {
2322          if (obj == this) {
2323           return true;
2324          }
2325          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData)) {
2326            return super.equals(obj);
2327          }
2328          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData) obj;
2329    
2330          boolean result = true;
2331          result = result && (hasSegmentState() == other.hasSegmentState());
2332          if (hasSegmentState()) {
2333            result = result && getSegmentState()
2334                .equals(other.getSegmentState());
2335          }
2336          result = result && (hasAcceptedInEpoch() == other.hasAcceptedInEpoch());
2337          if (hasAcceptedInEpoch()) {
2338            result = result && (getAcceptedInEpoch()
2339                == other.getAcceptedInEpoch());
2340          }
2341          result = result &&
2342              getUnknownFields().equals(other.getUnknownFields());
2343          return result;
2344        }
2345    
2346        private int memoizedHashCode = 0;
2347        @java.lang.Override
2348        public int hashCode() {
2349          if (memoizedHashCode != 0) {
2350            return memoizedHashCode;
2351          }
2352          int hash = 41;
2353          hash = (19 * hash) + getDescriptorForType().hashCode();
2354          if (hasSegmentState()) {
2355            hash = (37 * hash) + SEGMENTSTATE_FIELD_NUMBER;
2356            hash = (53 * hash) + getSegmentState().hashCode();
2357          }
2358          if (hasAcceptedInEpoch()) {
2359            hash = (37 * hash) + ACCEPTEDINEPOCH_FIELD_NUMBER;
2360            hash = (53 * hash) + hashLong(getAcceptedInEpoch());
2361          }
2362          hash = (29 * hash) + getUnknownFields().hashCode();
2363          memoizedHashCode = hash;
2364          return hash;
2365        }
2366    
2367        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2368            com.google.protobuf.ByteString data)
2369            throws com.google.protobuf.InvalidProtocolBufferException {
2370          return PARSER.parseFrom(data);
2371        }
2372        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2373            com.google.protobuf.ByteString data,
2374            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2375            throws com.google.protobuf.InvalidProtocolBufferException {
2376          return PARSER.parseFrom(data, extensionRegistry);
2377        }
2378        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(byte[] data)
2379            throws com.google.protobuf.InvalidProtocolBufferException {
2380          return PARSER.parseFrom(data);
2381        }
2382        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2383            byte[] data,
2384            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2385            throws com.google.protobuf.InvalidProtocolBufferException {
2386          return PARSER.parseFrom(data, extensionRegistry);
2387        }
2388        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(java.io.InputStream input)
2389            throws java.io.IOException {
2390          return PARSER.parseFrom(input);
2391        }
2392        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2393            java.io.InputStream input,
2394            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2395            throws java.io.IOException {
2396          return PARSER.parseFrom(input, extensionRegistry);
2397        }
2398        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseDelimitedFrom(java.io.InputStream input)
2399            throws java.io.IOException {
2400          return PARSER.parseDelimitedFrom(input);
2401        }
2402        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseDelimitedFrom(
2403            java.io.InputStream input,
2404            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2405            throws java.io.IOException {
2406          return PARSER.parseDelimitedFrom(input, extensionRegistry);
2407        }
2408        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2409            com.google.protobuf.CodedInputStream input)
2410            throws java.io.IOException {
2411          return PARSER.parseFrom(input);
2412        }
2413        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2414            com.google.protobuf.CodedInputStream input,
2415            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2416            throws java.io.IOException {
2417          return PARSER.parseFrom(input, extensionRegistry);
2418        }
2419    
2420        public static Builder newBuilder() { return Builder.create(); }
2421        public Builder newBuilderForType() { return newBuilder(); }
2422        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData prototype) {
2423          return newBuilder().mergeFrom(prototype);
2424        }
2425        public Builder toBuilder() { return newBuilder(this); }
2426    
2427        @java.lang.Override
2428        protected Builder newBuilderForType(
2429            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
2430          Builder builder = new Builder(parent);
2431          return builder;
2432        }
2433        /**
2434         * Protobuf type {@code hadoop.hdfs.PersistedRecoveryPaxosData}
2435         *
2436         * <pre>
2437         **
2438         * The storage format used on local disk for previously
2439         * accepted decisions.
2440         * </pre>
2441         */
2442        public static final class Builder extends
2443            com.google.protobuf.GeneratedMessage.Builder<Builder>
2444           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosDataOrBuilder {
2445          public static final com.google.protobuf.Descriptors.Descriptor
2446              getDescriptor() {
2447            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor;
2448          }
2449    
2450          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
2451              internalGetFieldAccessorTable() {
2452            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_fieldAccessorTable
2453                .ensureFieldAccessorsInitialized(
2454                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.Builder.class);
2455          }
2456    
2457          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.newBuilder()
2458          private Builder() {
2459            maybeForceBuilderInitialization();
2460          }
2461    
2462          private Builder(
2463              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
2464            super(parent);
2465            maybeForceBuilderInitialization();
2466          }
2467          private void maybeForceBuilderInitialization() {
2468            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
2469              getSegmentStateFieldBuilder();
2470            }
2471          }
2472          private static Builder create() {
2473            return new Builder();
2474          }
2475    
2476          public Builder clear() {
2477            super.clear();
2478            if (segmentStateBuilder_ == null) {
2479              segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
2480            } else {
2481              segmentStateBuilder_.clear();
2482            }
2483            bitField0_ = (bitField0_ & ~0x00000001);
2484            acceptedInEpoch_ = 0L;
2485            bitField0_ = (bitField0_ & ~0x00000002);
2486            return this;
2487          }
2488    
2489          public Builder clone() {
2490            return create().mergeFrom(buildPartial());
2491          }
2492    
2493          public com.google.protobuf.Descriptors.Descriptor
2494              getDescriptorForType() {
2495            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor;
2496          }
2497    
2498          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData getDefaultInstanceForType() {
2499            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.getDefaultInstance();
2500          }
2501    
2502          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData build() {
2503            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData result = buildPartial();
2504            if (!result.isInitialized()) {
2505              throw newUninitializedMessageException(result);
2506            }
2507            return result;
2508          }
2509    
2510          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData buildPartial() {
2511            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData(this);
2512            int from_bitField0_ = bitField0_;
2513            int to_bitField0_ = 0;
2514            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
2515              to_bitField0_ |= 0x00000001;
2516            }
2517            if (segmentStateBuilder_ == null) {
2518              result.segmentState_ = segmentState_;
2519            } else {
2520              result.segmentState_ = segmentStateBuilder_.build();
2521            }
2522            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
2523              to_bitField0_ |= 0x00000002;
2524            }
2525            result.acceptedInEpoch_ = acceptedInEpoch_;
2526            result.bitField0_ = to_bitField0_;
2527            onBuilt();
2528            return result;
2529          }
2530    
2531          public Builder mergeFrom(com.google.protobuf.Message other) {
2532            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData) {
2533              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData)other);
2534            } else {
2535              super.mergeFrom(other);
2536              return this;
2537            }
2538          }
2539    
2540          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData other) {
2541            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.getDefaultInstance()) return this;
2542            if (other.hasSegmentState()) {
2543              mergeSegmentState(other.getSegmentState());
2544            }
2545            if (other.hasAcceptedInEpoch()) {
2546              setAcceptedInEpoch(other.getAcceptedInEpoch());
2547            }
2548            this.mergeUnknownFields(other.getUnknownFields());
2549            return this;
2550          }
2551    
2552          public final boolean isInitialized() {
2553            if (!hasSegmentState()) {
2554              
2555              return false;
2556            }
2557            if (!hasAcceptedInEpoch()) {
2558              
2559              return false;
2560            }
2561            if (!getSegmentState().isInitialized()) {
2562              
2563              return false;
2564            }
2565            return true;
2566          }
2567    
2568          public Builder mergeFrom(
2569              com.google.protobuf.CodedInputStream input,
2570              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2571              throws java.io.IOException {
2572            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parsedMessage = null;
2573            try {
2574              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
2575            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
2576              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData) e.getUnfinishedMessage();
2577              throw e;
2578            } finally {
2579              if (parsedMessage != null) {
2580                mergeFrom(parsedMessage);
2581              }
2582            }
2583            return this;
2584          }
2585          private int bitField0_;
2586    
2587          // required .hadoop.hdfs.SegmentStateProto segmentState = 1;
2588          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
2589          private com.google.protobuf.SingleFieldBuilder<
2590              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> segmentStateBuilder_;
2591          /**
2592           * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2593           */
2594          public boolean hasSegmentState() {
2595            return ((bitField0_ & 0x00000001) == 0x00000001);
2596          }
2597          /**
2598           * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2599           */
2600          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
2601            if (segmentStateBuilder_ == null) {
2602              return segmentState_;
2603            } else {
2604              return segmentStateBuilder_.getMessage();
2605            }
2606          }
2607          /**
2608           * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2609           */
2610          public Builder setSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
2611            if (segmentStateBuilder_ == null) {
2612              if (value == null) {
2613                throw new NullPointerException();
2614              }
2615              segmentState_ = value;
2616              onChanged();
2617            } else {
2618              segmentStateBuilder_.setMessage(value);
2619            }
2620            bitField0_ |= 0x00000001;
2621            return this;
2622          }
2623          /**
2624           * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2625           */
2626          public Builder setSegmentState(
2627              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) {
2628            if (segmentStateBuilder_ == null) {
2629              segmentState_ = builderForValue.build();
2630              onChanged();
2631            } else {
2632              segmentStateBuilder_.setMessage(builderForValue.build());
2633            }
2634            bitField0_ |= 0x00000001;
2635            return this;
2636          }
2637          /**
2638           * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2639           */
2640          public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
2641            if (segmentStateBuilder_ == null) {
2642              if (((bitField0_ & 0x00000001) == 0x00000001) &&
2643                  segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
2644                segmentState_ =
2645                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial();
2646              } else {
2647                segmentState_ = value;
2648              }
2649              onChanged();
2650            } else {
2651              segmentStateBuilder_.mergeFrom(value);
2652            }
2653            bitField0_ |= 0x00000001;
2654            return this;
2655          }
2656          /**
2657           * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2658           */
2659          public Builder clearSegmentState() {
2660            if (segmentStateBuilder_ == null) {
2661              segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
2662              onChanged();
2663            } else {
2664              segmentStateBuilder_.clear();
2665            }
2666            bitField0_ = (bitField0_ & ~0x00000001);
2667            return this;
2668          }
2669          /**
2670           * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2671           */
2672          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getSegmentStateBuilder() {
2673            bitField0_ |= 0x00000001;
2674            onChanged();
2675            return getSegmentStateFieldBuilder().getBuilder();
2676          }
2677          /**
2678           * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2679           */
2680          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
2681            if (segmentStateBuilder_ != null) {
2682              return segmentStateBuilder_.getMessageOrBuilder();
2683            } else {
2684              return segmentState_;
2685            }
2686          }
2687          /**
2688           * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2689           */
2690          private com.google.protobuf.SingleFieldBuilder<
2691              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> 
2692              getSegmentStateFieldBuilder() {
2693            if (segmentStateBuilder_ == null) {
2694              segmentStateBuilder_ = new com.google.protobuf.SingleFieldBuilder<
2695                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>(
2696                      segmentState_,
2697                      getParentForChildren(),
2698                      isClean());
2699              segmentState_ = null;
2700            }
2701            return segmentStateBuilder_;
2702          }
2703    
2704          // required uint64 acceptedInEpoch = 2;
2705          private long acceptedInEpoch_ ;
2706          /**
2707           * <code>required uint64 acceptedInEpoch = 2;</code>
2708           */
2709          public boolean hasAcceptedInEpoch() {
2710            return ((bitField0_ & 0x00000002) == 0x00000002);
2711          }
2712          /**
2713           * <code>required uint64 acceptedInEpoch = 2;</code>
2714           */
2715          public long getAcceptedInEpoch() {
2716            return acceptedInEpoch_;
2717          }
2718          /**
2719           * <code>required uint64 acceptedInEpoch = 2;</code>
2720           */
2721          public Builder setAcceptedInEpoch(long value) {
2722            bitField0_ |= 0x00000002;
2723            acceptedInEpoch_ = value;
2724            onChanged();
2725            return this;
2726          }
2727          /**
2728           * <code>required uint64 acceptedInEpoch = 2;</code>
2729           */
2730          public Builder clearAcceptedInEpoch() {
2731            bitField0_ = (bitField0_ & ~0x00000002);
2732            acceptedInEpoch_ = 0L;
2733            onChanged();
2734            return this;
2735          }
2736    
2737          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PersistedRecoveryPaxosData)
2738        }
2739    
2740        static {
2741          defaultInstance = new PersistedRecoveryPaxosData(true);
2742          defaultInstance.initFields();
2743        }
2744    
2745        // @@protoc_insertion_point(class_scope:hadoop.hdfs.PersistedRecoveryPaxosData)
2746      }
2747    
2748      public interface JournalRequestProtoOrBuilder
2749          extends com.google.protobuf.MessageOrBuilder {
2750    
2751        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
2752        /**
2753         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
2754         */
2755        boolean hasReqInfo();
2756        /**
2757         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
2758         */
2759        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
2760        /**
2761         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
2762         */
2763        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
2764    
2765        // required uint64 firstTxnId = 2;
2766        /**
2767         * <code>required uint64 firstTxnId = 2;</code>
2768         */
2769        boolean hasFirstTxnId();
2770        /**
2771         * <code>required uint64 firstTxnId = 2;</code>
2772         */
2773        long getFirstTxnId();
2774    
2775        // required uint32 numTxns = 3;
2776        /**
2777         * <code>required uint32 numTxns = 3;</code>
2778         */
2779        boolean hasNumTxns();
2780        /**
2781         * <code>required uint32 numTxns = 3;</code>
2782         */
2783        int getNumTxns();
2784    
2785        // required bytes records = 4;
2786        /**
2787         * <code>required bytes records = 4;</code>
2788         */
2789        boolean hasRecords();
2790        /**
2791         * <code>required bytes records = 4;</code>
2792         */
2793        com.google.protobuf.ByteString getRecords();
2794    
2795        // required uint64 segmentTxnId = 5;
2796        /**
2797         * <code>required uint64 segmentTxnId = 5;</code>
2798         */
2799        boolean hasSegmentTxnId();
2800        /**
2801         * <code>required uint64 segmentTxnId = 5;</code>
2802         */
2803        long getSegmentTxnId();
2804      }
2805      /**
2806       * Protobuf type {@code hadoop.hdfs.JournalRequestProto}
2807       */
2808      public static final class JournalRequestProto extends
2809          com.google.protobuf.GeneratedMessage
2810          implements JournalRequestProtoOrBuilder {
2811        // Use JournalRequestProto.newBuilder() to construct.
2812        private JournalRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
2813          super(builder);
2814          this.unknownFields = builder.getUnknownFields();
2815        }
2816        private JournalRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
2817    
2818        private static final JournalRequestProto defaultInstance;
2819        public static JournalRequestProto getDefaultInstance() {
2820          return defaultInstance;
2821        }
2822    
2823        public JournalRequestProto getDefaultInstanceForType() {
2824          return defaultInstance;
2825        }
2826    
2827        private final com.google.protobuf.UnknownFieldSet unknownFields;
2828        @java.lang.Override
2829        public final com.google.protobuf.UnknownFieldSet
2830            getUnknownFields() {
2831          return this.unknownFields;
2832        }
2833        private JournalRequestProto(
2834            com.google.protobuf.CodedInputStream input,
2835            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2836            throws com.google.protobuf.InvalidProtocolBufferException {
2837          initFields();
2838          int mutable_bitField0_ = 0;
2839          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
2840              com.google.protobuf.UnknownFieldSet.newBuilder();
2841          try {
2842            boolean done = false;
2843            while (!done) {
2844              int tag = input.readTag();
2845              switch (tag) {
2846                case 0:
2847                  done = true;
2848                  break;
2849                default: {
2850                  if (!parseUnknownField(input, unknownFields,
2851                                         extensionRegistry, tag)) {
2852                    done = true;
2853                  }
2854                  break;
2855                }
2856                case 10: {
2857                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
2858                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
2859                    subBuilder = reqInfo_.toBuilder();
2860                  }
2861                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
2862                  if (subBuilder != null) {
2863                    subBuilder.mergeFrom(reqInfo_);
2864                    reqInfo_ = subBuilder.buildPartial();
2865                  }
2866                  bitField0_ |= 0x00000001;
2867                  break;
2868                }
2869                case 16: {
2870                  bitField0_ |= 0x00000002;
2871                  firstTxnId_ = input.readUInt64();
2872                  break;
2873                }
2874                case 24: {
2875                  bitField0_ |= 0x00000004;
2876                  numTxns_ = input.readUInt32();
2877                  break;
2878                }
2879                case 34: {
2880                  bitField0_ |= 0x00000008;
2881                  records_ = input.readBytes();
2882                  break;
2883                }
2884                case 40: {
2885                  bitField0_ |= 0x00000010;
2886                  segmentTxnId_ = input.readUInt64();
2887                  break;
2888                }
2889              }
2890            }
2891          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
2892            throw e.setUnfinishedMessage(this);
2893          } catch (java.io.IOException e) {
2894            throw new com.google.protobuf.InvalidProtocolBufferException(
2895                e.getMessage()).setUnfinishedMessage(this);
2896          } finally {
2897            this.unknownFields = unknownFields.build();
2898            makeExtensionsImmutable();
2899          }
2900        }
2901        public static final com.google.protobuf.Descriptors.Descriptor
2902            getDescriptor() {
2903          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalRequestProto_descriptor;
2904        }
2905    
2906        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
2907            internalGetFieldAccessorTable() {
2908          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalRequestProto_fieldAccessorTable
2909              .ensureFieldAccessorsInitialized(
2910                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.Builder.class);
2911        }
2912    
2913        public static com.google.protobuf.Parser<JournalRequestProto> PARSER =
2914            new com.google.protobuf.AbstractParser<JournalRequestProto>() {
2915          public JournalRequestProto parsePartialFrom(
2916              com.google.protobuf.CodedInputStream input,
2917              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2918              throws com.google.protobuf.InvalidProtocolBufferException {
2919            return new JournalRequestProto(input, extensionRegistry);
2920          }
2921        };
2922    
2923        @java.lang.Override
2924        public com.google.protobuf.Parser<JournalRequestProto> getParserForType() {
2925          return PARSER;
2926        }
2927    
2928        private int bitField0_;
2929        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
2930        public static final int REQINFO_FIELD_NUMBER = 1;
2931        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
2932        /**
2933         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
2934         */
2935        public boolean hasReqInfo() {
2936          return ((bitField0_ & 0x00000001) == 0x00000001);
2937        }
2938        /**
2939         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
2940         */
2941        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
2942          return reqInfo_;
2943        }
2944        /**
2945         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
2946         */
2947        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
2948          return reqInfo_;
2949        }
2950    
2951        // required uint64 firstTxnId = 2;
2952        public static final int FIRSTTXNID_FIELD_NUMBER = 2;
2953        private long firstTxnId_;
2954        /**
2955         * <code>required uint64 firstTxnId = 2;</code>
2956         */
2957        public boolean hasFirstTxnId() {
2958          return ((bitField0_ & 0x00000002) == 0x00000002);
2959        }
2960        /**
2961         * <code>required uint64 firstTxnId = 2;</code>
2962         */
2963        public long getFirstTxnId() {
2964          return firstTxnId_;
2965        }
2966    
2967        // required uint32 numTxns = 3;
2968        public static final int NUMTXNS_FIELD_NUMBER = 3;
2969        private int numTxns_;
2970        /**
2971         * <code>required uint32 numTxns = 3;</code>
2972         */
2973        public boolean hasNumTxns() {
2974          return ((bitField0_ & 0x00000004) == 0x00000004);
2975        }
2976        /**
2977         * <code>required uint32 numTxns = 3;</code>
2978         */
2979        public int getNumTxns() {
2980          return numTxns_;
2981        }
2982    
2983        // required bytes records = 4;
2984        public static final int RECORDS_FIELD_NUMBER = 4;
2985        private com.google.protobuf.ByteString records_;
2986        /**
2987         * <code>required bytes records = 4;</code>
2988         */
2989        public boolean hasRecords() {
2990          return ((bitField0_ & 0x00000008) == 0x00000008);
2991        }
2992        /**
2993         * <code>required bytes records = 4;</code>
2994         */
2995        public com.google.protobuf.ByteString getRecords() {
2996          return records_;
2997        }
2998    
2999        // required uint64 segmentTxnId = 5;
3000        public static final int SEGMENTTXNID_FIELD_NUMBER = 5;
3001        private long segmentTxnId_;
3002        /**
3003         * <code>required uint64 segmentTxnId = 5;</code>
3004         */
3005        public boolean hasSegmentTxnId() {
3006          return ((bitField0_ & 0x00000010) == 0x00000010);
3007        }
3008        /**
3009         * <code>required uint64 segmentTxnId = 5;</code>
3010         */
3011        public long getSegmentTxnId() {
3012          return segmentTxnId_;
3013        }
3014    
3015        private void initFields() {
3016          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
3017          firstTxnId_ = 0L;
3018          numTxns_ = 0;
3019          records_ = com.google.protobuf.ByteString.EMPTY;
3020          segmentTxnId_ = 0L;
3021        }
3022        private byte memoizedIsInitialized = -1;
3023        public final boolean isInitialized() {
3024          byte isInitialized = memoizedIsInitialized;
3025          if (isInitialized != -1) return isInitialized == 1;
3026    
3027          if (!hasReqInfo()) {
3028            memoizedIsInitialized = 0;
3029            return false;
3030          }
3031          if (!hasFirstTxnId()) {
3032            memoizedIsInitialized = 0;
3033            return false;
3034          }
3035          if (!hasNumTxns()) {
3036            memoizedIsInitialized = 0;
3037            return false;
3038          }
3039          if (!hasRecords()) {
3040            memoizedIsInitialized = 0;
3041            return false;
3042          }
3043          if (!hasSegmentTxnId()) {
3044            memoizedIsInitialized = 0;
3045            return false;
3046          }
3047          if (!getReqInfo().isInitialized()) {
3048            memoizedIsInitialized = 0;
3049            return false;
3050          }
3051          memoizedIsInitialized = 1;
3052          return true;
3053        }
3054    
3055        public void writeTo(com.google.protobuf.CodedOutputStream output)
3056                            throws java.io.IOException {
3057          getSerializedSize();
3058          if (((bitField0_ & 0x00000001) == 0x00000001)) {
3059            output.writeMessage(1, reqInfo_);
3060          }
3061          if (((bitField0_ & 0x00000002) == 0x00000002)) {
3062            output.writeUInt64(2, firstTxnId_);
3063          }
3064          if (((bitField0_ & 0x00000004) == 0x00000004)) {
3065            output.writeUInt32(3, numTxns_);
3066          }
3067          if (((bitField0_ & 0x00000008) == 0x00000008)) {
3068            output.writeBytes(4, records_);
3069          }
3070          if (((bitField0_ & 0x00000010) == 0x00000010)) {
3071            output.writeUInt64(5, segmentTxnId_);
3072          }
3073          getUnknownFields().writeTo(output);
3074        }
3075    
3076        private int memoizedSerializedSize = -1;
3077        public int getSerializedSize() {
3078          int size = memoizedSerializedSize;
3079          if (size != -1) return size;
3080    
3081          size = 0;
3082          if (((bitField0_ & 0x00000001) == 0x00000001)) {
3083            size += com.google.protobuf.CodedOutputStream
3084              .computeMessageSize(1, reqInfo_);
3085          }
3086          if (((bitField0_ & 0x00000002) == 0x00000002)) {
3087            size += com.google.protobuf.CodedOutputStream
3088              .computeUInt64Size(2, firstTxnId_);
3089          }
3090          if (((bitField0_ & 0x00000004) == 0x00000004)) {
3091            size += com.google.protobuf.CodedOutputStream
3092              .computeUInt32Size(3, numTxns_);
3093          }
3094          if (((bitField0_ & 0x00000008) == 0x00000008)) {
3095            size += com.google.protobuf.CodedOutputStream
3096              .computeBytesSize(4, records_);
3097          }
3098          if (((bitField0_ & 0x00000010) == 0x00000010)) {
3099            size += com.google.protobuf.CodedOutputStream
3100              .computeUInt64Size(5, segmentTxnId_);
3101          }
3102          size += getUnknownFields().getSerializedSize();
3103          memoizedSerializedSize = size;
3104          return size;
3105        }
3106    
3107        private static final long serialVersionUID = 0L;
3108        @java.lang.Override
3109        protected java.lang.Object writeReplace()
3110            throws java.io.ObjectStreamException {
3111          return super.writeReplace();
3112        }
3113    
3114        @java.lang.Override
3115        public boolean equals(final java.lang.Object obj) {
3116          if (obj == this) {
3117           return true;
3118          }
3119          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)) {
3120            return super.equals(obj);
3121          }
3122          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto) obj;
3123    
3124          boolean result = true;
3125          result = result && (hasReqInfo() == other.hasReqInfo());
3126          if (hasReqInfo()) {
3127            result = result && getReqInfo()
3128                .equals(other.getReqInfo());
3129          }
3130          result = result && (hasFirstTxnId() == other.hasFirstTxnId());
3131          if (hasFirstTxnId()) {
3132            result = result && (getFirstTxnId()
3133                == other.getFirstTxnId());
3134          }
3135          result = result && (hasNumTxns() == other.hasNumTxns());
3136          if (hasNumTxns()) {
3137            result = result && (getNumTxns()
3138                == other.getNumTxns());
3139          }
3140          result = result && (hasRecords() == other.hasRecords());
3141          if (hasRecords()) {
3142            result = result && getRecords()
3143                .equals(other.getRecords());
3144          }
3145          result = result && (hasSegmentTxnId() == other.hasSegmentTxnId());
3146          if (hasSegmentTxnId()) {
3147            result = result && (getSegmentTxnId()
3148                == other.getSegmentTxnId());
3149          }
3150          result = result &&
3151              getUnknownFields().equals(other.getUnknownFields());
3152          return result;
3153        }
3154    
3155        private int memoizedHashCode = 0;
3156        @java.lang.Override
3157        public int hashCode() {
3158          if (memoizedHashCode != 0) {
3159            return memoizedHashCode;
3160          }
3161          int hash = 41;
3162          hash = (19 * hash) + getDescriptorForType().hashCode();
3163          if (hasReqInfo()) {
3164            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
3165            hash = (53 * hash) + getReqInfo().hashCode();
3166          }
3167          if (hasFirstTxnId()) {
3168            hash = (37 * hash) + FIRSTTXNID_FIELD_NUMBER;
3169            hash = (53 * hash) + hashLong(getFirstTxnId());
3170          }
3171          if (hasNumTxns()) {
3172            hash = (37 * hash) + NUMTXNS_FIELD_NUMBER;
3173            hash = (53 * hash) + getNumTxns();
3174          }
3175          if (hasRecords()) {
3176            hash = (37 * hash) + RECORDS_FIELD_NUMBER;
3177            hash = (53 * hash) + getRecords().hashCode();
3178          }
3179          if (hasSegmentTxnId()) {
3180            hash = (37 * hash) + SEGMENTTXNID_FIELD_NUMBER;
3181            hash = (53 * hash) + hashLong(getSegmentTxnId());
3182          }
3183          hash = (29 * hash) + getUnknownFields().hashCode();
3184          memoizedHashCode = hash;
3185          return hash;
3186        }
3187    
3188        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3189            com.google.protobuf.ByteString data)
3190            throws com.google.protobuf.InvalidProtocolBufferException {
3191          return PARSER.parseFrom(data);
3192        }
3193        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3194            com.google.protobuf.ByteString data,
3195            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3196            throws com.google.protobuf.InvalidProtocolBufferException {
3197          return PARSER.parseFrom(data, extensionRegistry);
3198        }
3199        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(byte[] data)
3200            throws com.google.protobuf.InvalidProtocolBufferException {
3201          return PARSER.parseFrom(data);
3202        }
3203        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3204            byte[] data,
3205            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3206            throws com.google.protobuf.InvalidProtocolBufferException {
3207          return PARSER.parseFrom(data, extensionRegistry);
3208        }
3209        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(java.io.InputStream input)
3210            throws java.io.IOException {
3211          return PARSER.parseFrom(input);
3212        }
3213        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3214            java.io.InputStream input,
3215            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3216            throws java.io.IOException {
3217          return PARSER.parseFrom(input, extensionRegistry);
3218        }
3219        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseDelimitedFrom(java.io.InputStream input)
3220            throws java.io.IOException {
3221          return PARSER.parseDelimitedFrom(input);
3222        }
3223        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseDelimitedFrom(
3224            java.io.InputStream input,
3225            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3226            throws java.io.IOException {
3227          return PARSER.parseDelimitedFrom(input, extensionRegistry);
3228        }
3229        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3230            com.google.protobuf.CodedInputStream input)
3231            throws java.io.IOException {
3232          return PARSER.parseFrom(input);
3233        }
3234        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3235            com.google.protobuf.CodedInputStream input,
3236            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3237            throws java.io.IOException {
3238          return PARSER.parseFrom(input, extensionRegistry);
3239        }
3240    
3241        public static Builder newBuilder() { return Builder.create(); }
3242        public Builder newBuilderForType() { return newBuilder(); }
3243        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto prototype) {
3244          return newBuilder().mergeFrom(prototype);
3245        }
3246        public Builder toBuilder() { return newBuilder(this); }
3247    
3248        @java.lang.Override
3249        protected Builder newBuilderForType(
3250            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3251          Builder builder = new Builder(parent);
3252          return builder;
3253        }
3254        /**
3255         * Protobuf type {@code hadoop.hdfs.JournalRequestProto}
3256         */
3257        public static final class Builder extends
3258            com.google.protobuf.GeneratedMessage.Builder<Builder>
3259           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProtoOrBuilder {
3260          public static final com.google.protobuf.Descriptors.Descriptor
3261              getDescriptor() {
3262            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalRequestProto_descriptor;
3263          }
3264    
3265          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
3266              internalGetFieldAccessorTable() {
3267            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalRequestProto_fieldAccessorTable
3268                .ensureFieldAccessorsInitialized(
3269                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.Builder.class);
3270          }
3271    
3272          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.newBuilder()
3273          private Builder() {
3274            maybeForceBuilderInitialization();
3275          }
3276    
3277          private Builder(
3278              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3279            super(parent);
3280            maybeForceBuilderInitialization();
3281          }
3282          private void maybeForceBuilderInitialization() {
3283            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
3284              getReqInfoFieldBuilder();
3285            }
3286          }
3287          private static Builder create() {
3288            return new Builder();
3289          }
3290    
3291          public Builder clear() {
3292            super.clear();
3293            if (reqInfoBuilder_ == null) {
3294              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
3295            } else {
3296              reqInfoBuilder_.clear();
3297            }
3298            bitField0_ = (bitField0_ & ~0x00000001);
3299            firstTxnId_ = 0L;
3300            bitField0_ = (bitField0_ & ~0x00000002);
3301            numTxns_ = 0;
3302            bitField0_ = (bitField0_ & ~0x00000004);
3303            records_ = com.google.protobuf.ByteString.EMPTY;
3304            bitField0_ = (bitField0_ & ~0x00000008);
3305            segmentTxnId_ = 0L;
3306            bitField0_ = (bitField0_ & ~0x00000010);
3307            return this;
3308          }
3309    
3310          public Builder clone() {
3311            return create().mergeFrom(buildPartial());
3312          }
3313    
3314          public com.google.protobuf.Descriptors.Descriptor
3315              getDescriptorForType() {
3316            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalRequestProto_descriptor;
3317          }
3318    
3319          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto getDefaultInstanceForType() {
3320            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance();
3321          }
3322    
3323          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto build() {
3324            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto result = buildPartial();
3325            if (!result.isInitialized()) {
3326              throw newUninitializedMessageException(result);
3327            }
3328            return result;
3329          }
3330    
3331          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto buildPartial() {
3332            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto(this);
3333            int from_bitField0_ = bitField0_;
3334            int to_bitField0_ = 0;
3335            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
3336              to_bitField0_ |= 0x00000001;
3337            }
3338            if (reqInfoBuilder_ == null) {
3339              result.reqInfo_ = reqInfo_;
3340            } else {
3341              result.reqInfo_ = reqInfoBuilder_.build();
3342            }
3343            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
3344              to_bitField0_ |= 0x00000002;
3345            }
3346            result.firstTxnId_ = firstTxnId_;
3347            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
3348              to_bitField0_ |= 0x00000004;
3349            }
3350            result.numTxns_ = numTxns_;
3351            if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
3352              to_bitField0_ |= 0x00000008;
3353            }
3354            result.records_ = records_;
3355            if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
3356              to_bitField0_ |= 0x00000010;
3357            }
3358            result.segmentTxnId_ = segmentTxnId_;
3359            result.bitField0_ = to_bitField0_;
3360            onBuilt();
3361            return result;
3362          }
3363    
3364          public Builder mergeFrom(com.google.protobuf.Message other) {
3365            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto) {
3366              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)other);
3367            } else {
3368              super.mergeFrom(other);
3369              return this;
3370            }
3371          }
3372    
3373          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto other) {
3374            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance()) return this;
3375            if (other.hasReqInfo()) {
3376              mergeReqInfo(other.getReqInfo());
3377            }
3378            if (other.hasFirstTxnId()) {
3379              setFirstTxnId(other.getFirstTxnId());
3380            }
3381            if (other.hasNumTxns()) {
3382              setNumTxns(other.getNumTxns());
3383            }
3384            if (other.hasRecords()) {
3385              setRecords(other.getRecords());
3386            }
3387            if (other.hasSegmentTxnId()) {
3388              setSegmentTxnId(other.getSegmentTxnId());
3389            }
3390            this.mergeUnknownFields(other.getUnknownFields());
3391            return this;
3392          }
3393    
3394          public final boolean isInitialized() {
3395            if (!hasReqInfo()) {
3396              
3397              return false;
3398            }
3399            if (!hasFirstTxnId()) {
3400              
3401              return false;
3402            }
3403            if (!hasNumTxns()) {
3404              
3405              return false;
3406            }
3407            if (!hasRecords()) {
3408              
3409              return false;
3410            }
3411            if (!hasSegmentTxnId()) {
3412              
3413              return false;
3414            }
3415            if (!getReqInfo().isInitialized()) {
3416              
3417              return false;
3418            }
3419            return true;
3420          }
3421    
3422          public Builder mergeFrom(
3423              com.google.protobuf.CodedInputStream input,
3424              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3425              throws java.io.IOException {
3426            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parsedMessage = null;
3427            try {
3428              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
3429            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
3430              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto) e.getUnfinishedMessage();
3431              throw e;
3432            } finally {
3433              if (parsedMessage != null) {
3434                mergeFrom(parsedMessage);
3435              }
3436            }
3437            return this;
3438          }
3439          private int bitField0_;
3440    
3441          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
3442          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
3443          private com.google.protobuf.SingleFieldBuilder<
3444              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
3445          /**
3446           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3447           */
3448          public boolean hasReqInfo() {
3449            return ((bitField0_ & 0x00000001) == 0x00000001);
3450          }
3451          /**
3452           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3453           */
3454          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
3455            if (reqInfoBuilder_ == null) {
3456              return reqInfo_;
3457            } else {
3458              return reqInfoBuilder_.getMessage();
3459            }
3460          }
3461          /**
3462           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3463           */
3464          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
3465            if (reqInfoBuilder_ == null) {
3466              if (value == null) {
3467                throw new NullPointerException();
3468              }
3469              reqInfo_ = value;
3470              onChanged();
3471            } else {
3472              reqInfoBuilder_.setMessage(value);
3473            }
3474            bitField0_ |= 0x00000001;
3475            return this;
3476          }
3477          /**
3478           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3479           */
3480          public Builder setReqInfo(
3481              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
3482            if (reqInfoBuilder_ == null) {
3483              reqInfo_ = builderForValue.build();
3484              onChanged();
3485            } else {
3486              reqInfoBuilder_.setMessage(builderForValue.build());
3487            }
3488            bitField0_ |= 0x00000001;
3489            return this;
3490          }
3491          /**
3492           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3493           */
3494          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
3495            if (reqInfoBuilder_ == null) {
3496              if (((bitField0_ & 0x00000001) == 0x00000001) &&
3497                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
3498                reqInfo_ =
3499                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
3500              } else {
3501                reqInfo_ = value;
3502              }
3503              onChanged();
3504            } else {
3505              reqInfoBuilder_.mergeFrom(value);
3506            }
3507            bitField0_ |= 0x00000001;
3508            return this;
3509          }
3510          /**
3511           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3512           */
3513          public Builder clearReqInfo() {
3514            if (reqInfoBuilder_ == null) {
3515              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
3516              onChanged();
3517            } else {
3518              reqInfoBuilder_.clear();
3519            }
3520            bitField0_ = (bitField0_ & ~0x00000001);
3521            return this;
3522          }
3523          /**
3524           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3525           */
3526          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
3527            bitField0_ |= 0x00000001;
3528            onChanged();
3529            return getReqInfoFieldBuilder().getBuilder();
3530          }
3531          /**
3532           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3533           */
3534          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
3535            if (reqInfoBuilder_ != null) {
3536              return reqInfoBuilder_.getMessageOrBuilder();
3537            } else {
3538              return reqInfo_;
3539            }
3540          }
3541          /**
3542           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3543           */
3544          private com.google.protobuf.SingleFieldBuilder<
3545              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
3546              getReqInfoFieldBuilder() {
3547            if (reqInfoBuilder_ == null) {
3548              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
3549                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
3550                      reqInfo_,
3551                      getParentForChildren(),
3552                      isClean());
3553              reqInfo_ = null;
3554            }
3555            return reqInfoBuilder_;
3556          }
3557    
3558          // required uint64 firstTxnId = 2;
3559          private long firstTxnId_ ;
3560          /**
3561           * <code>required uint64 firstTxnId = 2;</code>
3562           */
3563          public boolean hasFirstTxnId() {
3564            return ((bitField0_ & 0x00000002) == 0x00000002);
3565          }
3566          /**
3567           * <code>required uint64 firstTxnId = 2;</code>
3568           */
3569          public long getFirstTxnId() {
3570            return firstTxnId_;
3571          }
3572          /**
3573           * <code>required uint64 firstTxnId = 2;</code>
3574           */
3575          public Builder setFirstTxnId(long value) {
3576            bitField0_ |= 0x00000002;
3577            firstTxnId_ = value;
3578            onChanged();
3579            return this;
3580          }
3581          /**
3582           * <code>required uint64 firstTxnId = 2;</code>
3583           */
3584          public Builder clearFirstTxnId() {
3585            bitField0_ = (bitField0_ & ~0x00000002);
3586            firstTxnId_ = 0L;
3587            onChanged();
3588            return this;
3589          }
3590    
3591          // required uint32 numTxns = 3;
3592          private int numTxns_ ;
3593          /**
3594           * <code>required uint32 numTxns = 3;</code>
3595           */
3596          public boolean hasNumTxns() {
3597            return ((bitField0_ & 0x00000004) == 0x00000004);
3598          }
3599          /**
3600           * <code>required uint32 numTxns = 3;</code>
3601           */
3602          public int getNumTxns() {
3603            return numTxns_;
3604          }
3605          /**
3606           * <code>required uint32 numTxns = 3;</code>
3607           */
3608          public Builder setNumTxns(int value) {
3609            bitField0_ |= 0x00000004;
3610            numTxns_ = value;
3611            onChanged();
3612            return this;
3613          }
3614          /**
3615           * <code>required uint32 numTxns = 3;</code>
3616           */
3617          public Builder clearNumTxns() {
3618            bitField0_ = (bitField0_ & ~0x00000004);
3619            numTxns_ = 0;
3620            onChanged();
3621            return this;
3622          }
3623    
3624          // required bytes records = 4;
3625          private com.google.protobuf.ByteString records_ = com.google.protobuf.ByteString.EMPTY;
3626          /**
3627           * <code>required bytes records = 4;</code>
3628           */
3629          public boolean hasRecords() {
3630            return ((bitField0_ & 0x00000008) == 0x00000008);
3631          }
3632          /**
3633           * <code>required bytes records = 4;</code>
3634           */
3635          public com.google.protobuf.ByteString getRecords() {
3636            return records_;
3637          }
3638          /**
3639           * <code>required bytes records = 4;</code>
3640           */
3641          public Builder setRecords(com.google.protobuf.ByteString value) {
3642            if (value == null) {
3643        throw new NullPointerException();
3644      }
3645      bitField0_ |= 0x00000008;
3646            records_ = value;
3647            onChanged();
3648            return this;
3649          }
3650          /**
3651           * <code>required bytes records = 4;</code>
3652           */
3653          public Builder clearRecords() {
3654            bitField0_ = (bitField0_ & ~0x00000008);
3655            records_ = getDefaultInstance().getRecords();
3656            onChanged();
3657            return this;
3658          }
3659    
3660          // required uint64 segmentTxnId = 5;
3661          private long segmentTxnId_ ;
3662          /**
3663           * <code>required uint64 segmentTxnId = 5;</code>
3664           */
3665          public boolean hasSegmentTxnId() {
3666            return ((bitField0_ & 0x00000010) == 0x00000010);
3667          }
3668          /**
3669           * <code>required uint64 segmentTxnId = 5;</code>
3670           */
3671          public long getSegmentTxnId() {
3672            return segmentTxnId_;
3673          }
3674          /**
3675           * <code>required uint64 segmentTxnId = 5;</code>
3676           */
3677          public Builder setSegmentTxnId(long value) {
3678            bitField0_ |= 0x00000010;
3679            segmentTxnId_ = value;
3680            onChanged();
3681            return this;
3682          }
3683          /**
3684           * <code>required uint64 segmentTxnId = 5;</code>
3685           */
3686          public Builder clearSegmentTxnId() {
3687            bitField0_ = (bitField0_ & ~0x00000010);
3688            segmentTxnId_ = 0L;
3689            onChanged();
3690            return this;
3691          }
3692    
3693          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.JournalRequestProto)
3694        }
3695    
3696        static {
3697          defaultInstance = new JournalRequestProto(true);
3698          defaultInstance.initFields();
3699        }
3700    
3701        // @@protoc_insertion_point(class_scope:hadoop.hdfs.JournalRequestProto)
3702      }
3703    
3704      public interface JournalResponseProtoOrBuilder
3705          extends com.google.protobuf.MessageOrBuilder {
3706      }
3707      /**
3708       * Protobuf type {@code hadoop.hdfs.JournalResponseProto}
3709       */
3710      public static final class JournalResponseProto extends
3711          com.google.protobuf.GeneratedMessage
3712          implements JournalResponseProtoOrBuilder {
3713        // Use JournalResponseProto.newBuilder() to construct.
3714        private JournalResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
3715          super(builder);
3716          this.unknownFields = builder.getUnknownFields();
3717        }
3718        private JournalResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
3719    
3720        private static final JournalResponseProto defaultInstance;
3721        public static JournalResponseProto getDefaultInstance() {
3722          return defaultInstance;
3723        }
3724    
3725        public JournalResponseProto getDefaultInstanceForType() {
3726          return defaultInstance;
3727        }
3728    
3729        private final com.google.protobuf.UnknownFieldSet unknownFields;
3730        @java.lang.Override
3731        public final com.google.protobuf.UnknownFieldSet
3732            getUnknownFields() {
3733          return this.unknownFields;
3734        }
3735        private JournalResponseProto(
3736            com.google.protobuf.CodedInputStream input,
3737            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3738            throws com.google.protobuf.InvalidProtocolBufferException {
3739          initFields();
3740          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
3741              com.google.protobuf.UnknownFieldSet.newBuilder();
3742          try {
3743            boolean done = false;
3744            while (!done) {
3745              int tag = input.readTag();
3746              switch (tag) {
3747                case 0:
3748                  done = true;
3749                  break;
3750                default: {
3751                  if (!parseUnknownField(input, unknownFields,
3752                                         extensionRegistry, tag)) {
3753                    done = true;
3754                  }
3755                  break;
3756                }
3757              }
3758            }
3759          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
3760            throw e.setUnfinishedMessage(this);
3761          } catch (java.io.IOException e) {
3762            throw new com.google.protobuf.InvalidProtocolBufferException(
3763                e.getMessage()).setUnfinishedMessage(this);
3764          } finally {
3765            this.unknownFields = unknownFields.build();
3766            makeExtensionsImmutable();
3767          }
3768        }
3769        public static final com.google.protobuf.Descriptors.Descriptor
3770            getDescriptor() {
3771          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalResponseProto_descriptor;
3772        }
3773    
3774        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
3775            internalGetFieldAccessorTable() {
3776          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalResponseProto_fieldAccessorTable
3777              .ensureFieldAccessorsInitialized(
3778                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.Builder.class);
3779        }
3780    
3781        public static com.google.protobuf.Parser<JournalResponseProto> PARSER =
3782            new com.google.protobuf.AbstractParser<JournalResponseProto>() {
3783          public JournalResponseProto parsePartialFrom(
3784              com.google.protobuf.CodedInputStream input,
3785              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3786              throws com.google.protobuf.InvalidProtocolBufferException {
3787            return new JournalResponseProto(input, extensionRegistry);
3788          }
3789        };
3790    
3791        @java.lang.Override
3792        public com.google.protobuf.Parser<JournalResponseProto> getParserForType() {
3793          return PARSER;
3794        }
3795    
3796        private void initFields() {
3797        }
3798        private byte memoizedIsInitialized = -1;
3799        public final boolean isInitialized() {
3800          byte isInitialized = memoizedIsInitialized;
3801          if (isInitialized != -1) return isInitialized == 1;
3802    
3803          memoizedIsInitialized = 1;
3804          return true;
3805        }
3806    
3807        public void writeTo(com.google.protobuf.CodedOutputStream output)
3808                            throws java.io.IOException {
3809          getSerializedSize();
3810          getUnknownFields().writeTo(output);
3811        }
3812    
3813        private int memoizedSerializedSize = -1;
3814        public int getSerializedSize() {
3815          int size = memoizedSerializedSize;
3816          if (size != -1) return size;
3817    
3818          size = 0;
3819          size += getUnknownFields().getSerializedSize();
3820          memoizedSerializedSize = size;
3821          return size;
3822        }
3823    
3824        private static final long serialVersionUID = 0L;
3825        @java.lang.Override
3826        protected java.lang.Object writeReplace()
3827            throws java.io.ObjectStreamException {
3828          return super.writeReplace();
3829        }
3830    
3831        @java.lang.Override
3832        public boolean equals(final java.lang.Object obj) {
3833          if (obj == this) {
3834           return true;
3835          }
3836          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto)) {
3837            return super.equals(obj);
3838          }
3839          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) obj;
3840    
3841          boolean result = true;
3842          result = result &&
3843              getUnknownFields().equals(other.getUnknownFields());
3844          return result;
3845        }
3846    
3847        private int memoizedHashCode = 0;
3848        @java.lang.Override
3849        public int hashCode() {
3850          if (memoizedHashCode != 0) {
3851            return memoizedHashCode;
3852          }
3853          int hash = 41;
3854          hash = (19 * hash) + getDescriptorForType().hashCode();
3855          hash = (29 * hash) + getUnknownFields().hashCode();
3856          memoizedHashCode = hash;
3857          return hash;
3858        }
3859    
3860        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3861            com.google.protobuf.ByteString data)
3862            throws com.google.protobuf.InvalidProtocolBufferException {
3863          return PARSER.parseFrom(data);
3864        }
3865        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3866            com.google.protobuf.ByteString data,
3867            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3868            throws com.google.protobuf.InvalidProtocolBufferException {
3869          return PARSER.parseFrom(data, extensionRegistry);
3870        }
3871        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(byte[] data)
3872            throws com.google.protobuf.InvalidProtocolBufferException {
3873          return PARSER.parseFrom(data);
3874        }
3875        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3876            byte[] data,
3877            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3878            throws com.google.protobuf.InvalidProtocolBufferException {
3879          return PARSER.parseFrom(data, extensionRegistry);
3880        }
3881        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(java.io.InputStream input)
3882            throws java.io.IOException {
3883          return PARSER.parseFrom(input);
3884        }
3885        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3886            java.io.InputStream input,
3887            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3888            throws java.io.IOException {
3889          return PARSER.parseFrom(input, extensionRegistry);
3890        }
3891        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseDelimitedFrom(java.io.InputStream input)
3892            throws java.io.IOException {
3893          return PARSER.parseDelimitedFrom(input);
3894        }
3895        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseDelimitedFrom(
3896            java.io.InputStream input,
3897            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3898            throws java.io.IOException {
3899          return PARSER.parseDelimitedFrom(input, extensionRegistry);
3900        }
3901        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3902            com.google.protobuf.CodedInputStream input)
3903            throws java.io.IOException {
3904          return PARSER.parseFrom(input);
3905        }
3906        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3907            com.google.protobuf.CodedInputStream input,
3908            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3909            throws java.io.IOException {
3910          return PARSER.parseFrom(input, extensionRegistry);
3911        }
3912    
3913        public static Builder newBuilder() { return Builder.create(); }
3914        public Builder newBuilderForType() { return newBuilder(); }
3915        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto prototype) {
3916          return newBuilder().mergeFrom(prototype);
3917        }
3918        public Builder toBuilder() { return newBuilder(this); }
3919    
3920        @java.lang.Override
3921        protected Builder newBuilderForType(
3922            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3923          Builder builder = new Builder(parent);
3924          return builder;
3925        }
3926        /**
3927         * Protobuf type {@code hadoop.hdfs.JournalResponseProto}
3928         */
3929        public static final class Builder extends
3930            com.google.protobuf.GeneratedMessage.Builder<Builder>
3931           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProtoOrBuilder {
3932          public static final com.google.protobuf.Descriptors.Descriptor
3933              getDescriptor() {
3934            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalResponseProto_descriptor;
3935          }
3936    
3937          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
3938              internalGetFieldAccessorTable() {
3939            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalResponseProto_fieldAccessorTable
3940                .ensureFieldAccessorsInitialized(
3941                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.Builder.class);
3942          }
3943    
3944          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.newBuilder()
3945          private Builder() {
3946            maybeForceBuilderInitialization();
3947          }
3948    
3949          private Builder(
3950              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3951            super(parent);
3952            maybeForceBuilderInitialization();
3953          }
3954          private void maybeForceBuilderInitialization() {
3955            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
3956            }
3957          }
3958          private static Builder create() {
3959            return new Builder();
3960          }
3961    
3962          public Builder clear() {
3963            super.clear();
3964            return this;
3965          }
3966    
3967          public Builder clone() {
3968            return create().mergeFrom(buildPartial());
3969          }
3970    
3971          public com.google.protobuf.Descriptors.Descriptor
3972              getDescriptorForType() {
3973            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalResponseProto_descriptor;
3974          }
3975    
3976          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto getDefaultInstanceForType() {
3977            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance();
3978          }
3979    
3980          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto build() {
3981            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto result = buildPartial();
3982            if (!result.isInitialized()) {
3983              throw newUninitializedMessageException(result);
3984            }
3985            return result;
3986          }
3987    
3988          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto buildPartial() {
3989            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto(this);
3990            onBuilt();
3991            return result;
3992          }
3993    
3994          public Builder mergeFrom(com.google.protobuf.Message other) {
3995            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) {
3996              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto)other);
3997            } else {
3998              super.mergeFrom(other);
3999              return this;
4000            }
4001          }
4002    
4003          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto other) {
4004            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance()) return this;
4005            this.mergeUnknownFields(other.getUnknownFields());
4006            return this;
4007          }
4008    
4009          public final boolean isInitialized() {
4010            return true;
4011          }
4012    
4013          public Builder mergeFrom(
4014              com.google.protobuf.CodedInputStream input,
4015              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4016              throws java.io.IOException {
4017            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parsedMessage = null;
4018            try {
4019              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
4020            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4021              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) e.getUnfinishedMessage();
4022              throw e;
4023            } finally {
4024              if (parsedMessage != null) {
4025                mergeFrom(parsedMessage);
4026              }
4027            }
4028            return this;
4029          }
4030    
4031          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.JournalResponseProto)
4032        }
4033    
4034        static {
4035          defaultInstance = new JournalResponseProto(true);
4036          defaultInstance.initFields();
4037        }
4038    
4039        // @@protoc_insertion_point(class_scope:hadoop.hdfs.JournalResponseProto)
4040      }
4041    
4042      public interface HeartbeatRequestProtoOrBuilder
4043          extends com.google.protobuf.MessageOrBuilder {
4044    
4045        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
4046        /**
4047         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4048         */
4049        boolean hasReqInfo();
4050        /**
4051         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4052         */
4053        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
4054        /**
4055         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4056         */
4057        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
4058      }
4059      /**
4060       * Protobuf type {@code hadoop.hdfs.HeartbeatRequestProto}
4061       */
4062      public static final class HeartbeatRequestProto extends
4063          com.google.protobuf.GeneratedMessage
4064          implements HeartbeatRequestProtoOrBuilder {
4065        // Use HeartbeatRequestProto.newBuilder() to construct.
4066        private HeartbeatRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
4067          super(builder);
4068          this.unknownFields = builder.getUnknownFields();
4069        }
4070        private HeartbeatRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
4071    
4072        private static final HeartbeatRequestProto defaultInstance;
4073        public static HeartbeatRequestProto getDefaultInstance() {
4074          return defaultInstance;
4075        }
4076    
4077        public HeartbeatRequestProto getDefaultInstanceForType() {
4078          return defaultInstance;
4079        }
4080    
4081        private final com.google.protobuf.UnknownFieldSet unknownFields;
4082        @java.lang.Override
4083        public final com.google.protobuf.UnknownFieldSet
4084            getUnknownFields() {
4085          return this.unknownFields;
4086        }
4087        private HeartbeatRequestProto(
4088            com.google.protobuf.CodedInputStream input,
4089            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4090            throws com.google.protobuf.InvalidProtocolBufferException {
4091          initFields();
4092          int mutable_bitField0_ = 0;
4093          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
4094              com.google.protobuf.UnknownFieldSet.newBuilder();
4095          try {
4096            boolean done = false;
4097            while (!done) {
4098              int tag = input.readTag();
4099              switch (tag) {
4100                case 0:
4101                  done = true;
4102                  break;
4103                default: {
4104                  if (!parseUnknownField(input, unknownFields,
4105                                         extensionRegistry, tag)) {
4106                    done = true;
4107                  }
4108                  break;
4109                }
4110                case 10: {
4111                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
4112                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
4113                    subBuilder = reqInfo_.toBuilder();
4114                  }
4115                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
4116                  if (subBuilder != null) {
4117                    subBuilder.mergeFrom(reqInfo_);
4118                    reqInfo_ = subBuilder.buildPartial();
4119                  }
4120                  bitField0_ |= 0x00000001;
4121                  break;
4122                }
4123              }
4124            }
4125          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4126            throw e.setUnfinishedMessage(this);
4127          } catch (java.io.IOException e) {
4128            throw new com.google.protobuf.InvalidProtocolBufferException(
4129                e.getMessage()).setUnfinishedMessage(this);
4130          } finally {
4131            this.unknownFields = unknownFields.build();
4132            makeExtensionsImmutable();
4133          }
4134        }
4135        public static final com.google.protobuf.Descriptors.Descriptor
4136            getDescriptor() {
4137          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor;
4138        }
4139    
4140        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4141            internalGetFieldAccessorTable() {
4142          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatRequestProto_fieldAccessorTable
4143              .ensureFieldAccessorsInitialized(
4144                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.Builder.class);
4145        }
4146    
4147        public static com.google.protobuf.Parser<HeartbeatRequestProto> PARSER =
4148            new com.google.protobuf.AbstractParser<HeartbeatRequestProto>() {
4149          public HeartbeatRequestProto parsePartialFrom(
4150              com.google.protobuf.CodedInputStream input,
4151              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4152              throws com.google.protobuf.InvalidProtocolBufferException {
4153            return new HeartbeatRequestProto(input, extensionRegistry);
4154          }
4155        };
4156    
4157        @java.lang.Override
4158        public com.google.protobuf.Parser<HeartbeatRequestProto> getParserForType() {
4159          return PARSER;
4160        }
4161    
4162        private int bitField0_;
4163        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
4164        public static final int REQINFO_FIELD_NUMBER = 1;
4165        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
4166        /**
4167         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4168         */
4169        public boolean hasReqInfo() {
4170          return ((bitField0_ & 0x00000001) == 0x00000001);
4171        }
4172        /**
4173         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4174         */
4175        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
4176          return reqInfo_;
4177        }
4178        /**
4179         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4180         */
4181        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
4182          return reqInfo_;
4183        }
4184    
4185        private void initFields() {
4186          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
4187        }
4188        private byte memoizedIsInitialized = -1;
4189        public final boolean isInitialized() {
4190          byte isInitialized = memoizedIsInitialized;
4191          if (isInitialized != -1) return isInitialized == 1;
4192    
4193          if (!hasReqInfo()) {
4194            memoizedIsInitialized = 0;
4195            return false;
4196          }
4197          if (!getReqInfo().isInitialized()) {
4198            memoizedIsInitialized = 0;
4199            return false;
4200          }
4201          memoizedIsInitialized = 1;
4202          return true;
4203        }
4204    
4205        public void writeTo(com.google.protobuf.CodedOutputStream output)
4206                            throws java.io.IOException {
4207          getSerializedSize();
4208          if (((bitField0_ & 0x00000001) == 0x00000001)) {
4209            output.writeMessage(1, reqInfo_);
4210          }
4211          getUnknownFields().writeTo(output);
4212        }
4213    
4214        private int memoizedSerializedSize = -1;
4215        public int getSerializedSize() {
4216          int size = memoizedSerializedSize;
4217          if (size != -1) return size;
4218    
4219          size = 0;
4220          if (((bitField0_ & 0x00000001) == 0x00000001)) {
4221            size += com.google.protobuf.CodedOutputStream
4222              .computeMessageSize(1, reqInfo_);
4223          }
4224          size += getUnknownFields().getSerializedSize();
4225          memoizedSerializedSize = size;
4226          return size;
4227        }
4228    
4229        private static final long serialVersionUID = 0L;
4230        @java.lang.Override
4231        protected java.lang.Object writeReplace()
4232            throws java.io.ObjectStreamException {
4233          return super.writeReplace();
4234        }
4235    
4236        @java.lang.Override
4237        public boolean equals(final java.lang.Object obj) {
4238          if (obj == this) {
4239           return true;
4240          }
4241          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)) {
4242            return super.equals(obj);
4243          }
4244          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto) obj;
4245    
4246          boolean result = true;
4247          result = result && (hasReqInfo() == other.hasReqInfo());
4248          if (hasReqInfo()) {
4249            result = result && getReqInfo()
4250                .equals(other.getReqInfo());
4251          }
4252          result = result &&
4253              getUnknownFields().equals(other.getUnknownFields());
4254          return result;
4255        }
4256    
4257        private int memoizedHashCode = 0;
4258        @java.lang.Override
4259        public int hashCode() {
4260          if (memoizedHashCode != 0) {
4261            return memoizedHashCode;
4262          }
4263          int hash = 41;
4264          hash = (19 * hash) + getDescriptorForType().hashCode();
4265          if (hasReqInfo()) {
4266            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
4267            hash = (53 * hash) + getReqInfo().hashCode();
4268          }
4269          hash = (29 * hash) + getUnknownFields().hashCode();
4270          memoizedHashCode = hash;
4271          return hash;
4272        }
4273    
4274        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4275            com.google.protobuf.ByteString data)
4276            throws com.google.protobuf.InvalidProtocolBufferException {
4277          return PARSER.parseFrom(data);
4278        }
4279        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4280            com.google.protobuf.ByteString data,
4281            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4282            throws com.google.protobuf.InvalidProtocolBufferException {
4283          return PARSER.parseFrom(data, extensionRegistry);
4284        }
4285        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(byte[] data)
4286            throws com.google.protobuf.InvalidProtocolBufferException {
4287          return PARSER.parseFrom(data);
4288        }
4289        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4290            byte[] data,
4291            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4292            throws com.google.protobuf.InvalidProtocolBufferException {
4293          return PARSER.parseFrom(data, extensionRegistry);
4294        }
4295        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(java.io.InputStream input)
4296            throws java.io.IOException {
4297          return PARSER.parseFrom(input);
4298        }
4299        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4300            java.io.InputStream input,
4301            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4302            throws java.io.IOException {
4303          return PARSER.parseFrom(input, extensionRegistry);
4304        }
4305        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseDelimitedFrom(java.io.InputStream input)
4306            throws java.io.IOException {
4307          return PARSER.parseDelimitedFrom(input);
4308        }
4309        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseDelimitedFrom(
4310            java.io.InputStream input,
4311            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4312            throws java.io.IOException {
4313          return PARSER.parseDelimitedFrom(input, extensionRegistry);
4314        }
4315        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4316            com.google.protobuf.CodedInputStream input)
4317            throws java.io.IOException {
4318          return PARSER.parseFrom(input);
4319        }
4320        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4321            com.google.protobuf.CodedInputStream input,
4322            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4323            throws java.io.IOException {
4324          return PARSER.parseFrom(input, extensionRegistry);
4325        }
4326    
4327        public static Builder newBuilder() { return Builder.create(); }
4328        public Builder newBuilderForType() { return newBuilder(); }
4329        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto prototype) {
4330          return newBuilder().mergeFrom(prototype);
4331        }
4332        public Builder toBuilder() { return newBuilder(this); }
4333    
4334        @java.lang.Override
4335        protected Builder newBuilderForType(
4336            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4337          Builder builder = new Builder(parent);
4338          return builder;
4339        }
4340        /**
4341         * Protobuf type {@code hadoop.hdfs.HeartbeatRequestProto}
4342         */
4343        public static final class Builder extends
4344            com.google.protobuf.GeneratedMessage.Builder<Builder>
4345           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProtoOrBuilder {
4346          public static final com.google.protobuf.Descriptors.Descriptor
4347              getDescriptor() {
4348            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor;
4349          }
4350    
4351          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4352              internalGetFieldAccessorTable() {
4353            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatRequestProto_fieldAccessorTable
4354                .ensureFieldAccessorsInitialized(
4355                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.Builder.class);
4356          }
4357    
4358          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.newBuilder()
4359          private Builder() {
4360            maybeForceBuilderInitialization();
4361          }
4362    
4363          private Builder(
4364              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4365            super(parent);
4366            maybeForceBuilderInitialization();
4367          }
4368          private void maybeForceBuilderInitialization() {
4369            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
4370              getReqInfoFieldBuilder();
4371            }
4372          }
4373          private static Builder create() {
4374            return new Builder();
4375          }
4376    
4377          public Builder clear() {
4378            super.clear();
4379            if (reqInfoBuilder_ == null) {
4380              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
4381            } else {
4382              reqInfoBuilder_.clear();
4383            }
4384            bitField0_ = (bitField0_ & ~0x00000001);
4385            return this;
4386          }
4387    
4388          public Builder clone() {
4389            return create().mergeFrom(buildPartial());
4390          }
4391    
4392          public com.google.protobuf.Descriptors.Descriptor
4393              getDescriptorForType() {
4394            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor;
4395          }
4396    
4397          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto getDefaultInstanceForType() {
4398            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
4399          }
4400    
4401          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto build() {
4402            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto result = buildPartial();
4403            if (!result.isInitialized()) {
4404              throw newUninitializedMessageException(result);
4405            }
4406            return result;
4407          }
4408    
4409          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto buildPartial() {
4410            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto(this);
4411            int from_bitField0_ = bitField0_;
4412            int to_bitField0_ = 0;
4413            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
4414              to_bitField0_ |= 0x00000001;
4415            }
4416            if (reqInfoBuilder_ == null) {
4417              result.reqInfo_ = reqInfo_;
4418            } else {
4419              result.reqInfo_ = reqInfoBuilder_.build();
4420            }
4421            result.bitField0_ = to_bitField0_;
4422            onBuilt();
4423            return result;
4424          }
4425    
4426          public Builder mergeFrom(com.google.protobuf.Message other) {
4427            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto) {
4428              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)other);
4429            } else {
4430              super.mergeFrom(other);
4431              return this;
4432            }
4433          }
4434    
4435          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto other) {
4436            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance()) return this;
4437            if (other.hasReqInfo()) {
4438              mergeReqInfo(other.getReqInfo());
4439            }
4440            this.mergeUnknownFields(other.getUnknownFields());
4441            return this;
4442          }
4443    
4444          public final boolean isInitialized() {
4445            if (!hasReqInfo()) {
4446              
4447              return false;
4448            }
4449            if (!getReqInfo().isInitialized()) {
4450              
4451              return false;
4452            }
4453            return true;
4454          }
4455    
4456          public Builder mergeFrom(
4457              com.google.protobuf.CodedInputStream input,
4458              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4459              throws java.io.IOException {
4460            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parsedMessage = null;
4461            try {
4462              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
4463            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4464              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto) e.getUnfinishedMessage();
4465              throw e;
4466            } finally {
4467              if (parsedMessage != null) {
4468                mergeFrom(parsedMessage);
4469              }
4470            }
4471            return this;
4472          }
4473          private int bitField0_;
4474    
4475          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
4476          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
4477          private com.google.protobuf.SingleFieldBuilder<
4478              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
4479          /**
4480           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4481           */
4482          public boolean hasReqInfo() {
4483            return ((bitField0_ & 0x00000001) == 0x00000001);
4484          }
4485          /**
4486           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4487           */
4488          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
4489            if (reqInfoBuilder_ == null) {
4490              return reqInfo_;
4491            } else {
4492              return reqInfoBuilder_.getMessage();
4493            }
4494          }
4495          /**
4496           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4497           */
4498          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
4499            if (reqInfoBuilder_ == null) {
4500              if (value == null) {
4501                throw new NullPointerException();
4502              }
4503              reqInfo_ = value;
4504              onChanged();
4505            } else {
4506              reqInfoBuilder_.setMessage(value);
4507            }
4508            bitField0_ |= 0x00000001;
4509            return this;
4510          }
4511          /**
4512           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4513           */
4514          public Builder setReqInfo(
4515              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
4516            if (reqInfoBuilder_ == null) {
4517              reqInfo_ = builderForValue.build();
4518              onChanged();
4519            } else {
4520              reqInfoBuilder_.setMessage(builderForValue.build());
4521            }
4522            bitField0_ |= 0x00000001;
4523            return this;
4524          }
4525          /**
4526           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4527           */
4528          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
4529            if (reqInfoBuilder_ == null) {
4530              if (((bitField0_ & 0x00000001) == 0x00000001) &&
4531                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
4532                reqInfo_ =
4533                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
4534              } else {
4535                reqInfo_ = value;
4536              }
4537              onChanged();
4538            } else {
4539              reqInfoBuilder_.mergeFrom(value);
4540            }
4541            bitField0_ |= 0x00000001;
4542            return this;
4543          }
4544          /**
4545           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4546           */
4547          public Builder clearReqInfo() {
4548            if (reqInfoBuilder_ == null) {
4549              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
4550              onChanged();
4551            } else {
4552              reqInfoBuilder_.clear();
4553            }
4554            bitField0_ = (bitField0_ & ~0x00000001);
4555            return this;
4556          }
4557          /**
4558           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4559           */
4560          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
4561            bitField0_ |= 0x00000001;
4562            onChanged();
4563            return getReqInfoFieldBuilder().getBuilder();
4564          }
4565          /**
4566           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4567           */
4568          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
4569            if (reqInfoBuilder_ != null) {
4570              return reqInfoBuilder_.getMessageOrBuilder();
4571            } else {
4572              return reqInfo_;
4573            }
4574          }
4575          /**
4576           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4577           */
4578          private com.google.protobuf.SingleFieldBuilder<
4579              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
4580              getReqInfoFieldBuilder() {
4581            if (reqInfoBuilder_ == null) {
4582              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
4583                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
4584                      reqInfo_,
4585                      getParentForChildren(),
4586                      isClean());
4587              reqInfo_ = null;
4588            }
4589            return reqInfoBuilder_;
4590          }
4591    
4592          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.HeartbeatRequestProto)
4593        }
4594    
4595        static {
4596          defaultInstance = new HeartbeatRequestProto(true);
4597          defaultInstance.initFields();
4598        }
4599    
4600        // @@protoc_insertion_point(class_scope:hadoop.hdfs.HeartbeatRequestProto)
4601      }
4602    
4603      public interface HeartbeatResponseProtoOrBuilder
4604          extends com.google.protobuf.MessageOrBuilder {
4605      }
4606      /**
4607       * Protobuf type {@code hadoop.hdfs.HeartbeatResponseProto}
4608       *
4609       * <pre>
4610       * void response
4611       * </pre>
4612       */
4613      public static final class HeartbeatResponseProto extends
4614          com.google.protobuf.GeneratedMessage
4615          implements HeartbeatResponseProtoOrBuilder {
4616        // Use HeartbeatResponseProto.newBuilder() to construct.
4617        private HeartbeatResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
4618          super(builder);
4619          this.unknownFields = builder.getUnknownFields();
4620        }
4621        private HeartbeatResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
4622    
4623        private static final HeartbeatResponseProto defaultInstance;
4624        public static HeartbeatResponseProto getDefaultInstance() {
4625          return defaultInstance;
4626        }
4627    
4628        public HeartbeatResponseProto getDefaultInstanceForType() {
4629          return defaultInstance;
4630        }
4631    
4632        private final com.google.protobuf.UnknownFieldSet unknownFields;
4633        @java.lang.Override
4634        public final com.google.protobuf.UnknownFieldSet
4635            getUnknownFields() {
4636          return this.unknownFields;
4637        }
4638        private HeartbeatResponseProto(
4639            com.google.protobuf.CodedInputStream input,
4640            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4641            throws com.google.protobuf.InvalidProtocolBufferException {
4642          initFields();
4643          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
4644              com.google.protobuf.UnknownFieldSet.newBuilder();
4645          try {
4646            boolean done = false;
4647            while (!done) {
4648              int tag = input.readTag();
4649              switch (tag) {
4650                case 0:
4651                  done = true;
4652                  break;
4653                default: {
4654                  if (!parseUnknownField(input, unknownFields,
4655                                         extensionRegistry, tag)) {
4656                    done = true;
4657                  }
4658                  break;
4659                }
4660              }
4661            }
4662          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4663            throw e.setUnfinishedMessage(this);
4664          } catch (java.io.IOException e) {
4665            throw new com.google.protobuf.InvalidProtocolBufferException(
4666                e.getMessage()).setUnfinishedMessage(this);
4667          } finally {
4668            this.unknownFields = unknownFields.build();
4669            makeExtensionsImmutable();
4670          }
4671        }
4672        public static final com.google.protobuf.Descriptors.Descriptor
4673            getDescriptor() {
4674          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor;
4675        }
4676    
4677        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4678            internalGetFieldAccessorTable() {
4679          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatResponseProto_fieldAccessorTable
4680              .ensureFieldAccessorsInitialized(
4681                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.Builder.class);
4682        }
4683    
4684        public static com.google.protobuf.Parser<HeartbeatResponseProto> PARSER =
4685            new com.google.protobuf.AbstractParser<HeartbeatResponseProto>() {
4686          public HeartbeatResponseProto parsePartialFrom(
4687              com.google.protobuf.CodedInputStream input,
4688              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4689              throws com.google.protobuf.InvalidProtocolBufferException {
4690            return new HeartbeatResponseProto(input, extensionRegistry);
4691          }
4692        };
4693    
4694        @java.lang.Override
4695        public com.google.protobuf.Parser<HeartbeatResponseProto> getParserForType() {
4696          return PARSER;
4697        }
4698    
4699        private void initFields() {
4700        }
4701        private byte memoizedIsInitialized = -1;
4702        public final boolean isInitialized() {
4703          byte isInitialized = memoizedIsInitialized;
4704          if (isInitialized != -1) return isInitialized == 1;
4705    
4706          memoizedIsInitialized = 1;
4707          return true;
4708        }
4709    
4710        public void writeTo(com.google.protobuf.CodedOutputStream output)
4711                            throws java.io.IOException {
4712          getSerializedSize();
4713          getUnknownFields().writeTo(output);
4714        }
4715    
4716        private int memoizedSerializedSize = -1;
4717        public int getSerializedSize() {
4718          int size = memoizedSerializedSize;
4719          if (size != -1) return size;
4720    
4721          size = 0;
4722          size += getUnknownFields().getSerializedSize();
4723          memoizedSerializedSize = size;
4724          return size;
4725        }
4726    
4727        private static final long serialVersionUID = 0L;
4728        @java.lang.Override
4729        protected java.lang.Object writeReplace()
4730            throws java.io.ObjectStreamException {
4731          return super.writeReplace();
4732        }
4733    
4734        @java.lang.Override
4735        public boolean equals(final java.lang.Object obj) {
4736          if (obj == this) {
4737           return true;
4738          }
4739          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto)) {
4740            return super.equals(obj);
4741          }
4742          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) obj;
4743    
4744          boolean result = true;
4745          result = result &&
4746              getUnknownFields().equals(other.getUnknownFields());
4747          return result;
4748        }
4749    
4750        private int memoizedHashCode = 0;
4751        @java.lang.Override
4752        public int hashCode() {
4753          if (memoizedHashCode != 0) {
4754            return memoizedHashCode;
4755          }
4756          int hash = 41;
4757          hash = (19 * hash) + getDescriptorForType().hashCode();
4758          hash = (29 * hash) + getUnknownFields().hashCode();
4759          memoizedHashCode = hash;
4760          return hash;
4761        }
4762    
4763        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4764            com.google.protobuf.ByteString data)
4765            throws com.google.protobuf.InvalidProtocolBufferException {
4766          return PARSER.parseFrom(data);
4767        }
4768        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4769            com.google.protobuf.ByteString data,
4770            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4771            throws com.google.protobuf.InvalidProtocolBufferException {
4772          return PARSER.parseFrom(data, extensionRegistry);
4773        }
4774        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(byte[] data)
4775            throws com.google.protobuf.InvalidProtocolBufferException {
4776          return PARSER.parseFrom(data);
4777        }
4778        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4779            byte[] data,
4780            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4781            throws com.google.protobuf.InvalidProtocolBufferException {
4782          return PARSER.parseFrom(data, extensionRegistry);
4783        }
4784        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(java.io.InputStream input)
4785            throws java.io.IOException {
4786          return PARSER.parseFrom(input);
4787        }
4788        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4789            java.io.InputStream input,
4790            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4791            throws java.io.IOException {
4792          return PARSER.parseFrom(input, extensionRegistry);
4793        }
4794        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseDelimitedFrom(java.io.InputStream input)
4795            throws java.io.IOException {
4796          return PARSER.parseDelimitedFrom(input);
4797        }
4798        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseDelimitedFrom(
4799            java.io.InputStream input,
4800            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4801            throws java.io.IOException {
4802          return PARSER.parseDelimitedFrom(input, extensionRegistry);
4803        }
4804        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4805            com.google.protobuf.CodedInputStream input)
4806            throws java.io.IOException {
4807          return PARSER.parseFrom(input);
4808        }
4809        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4810            com.google.protobuf.CodedInputStream input,
4811            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4812            throws java.io.IOException {
4813          return PARSER.parseFrom(input, extensionRegistry);
4814        }
4815    
4816        public static Builder newBuilder() { return Builder.create(); }
4817        public Builder newBuilderForType() { return newBuilder(); }
4818        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto prototype) {
4819          return newBuilder().mergeFrom(prototype);
4820        }
4821        public Builder toBuilder() { return newBuilder(this); }
4822    
4823        @java.lang.Override
4824        protected Builder newBuilderForType(
4825            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4826          Builder builder = new Builder(parent);
4827          return builder;
4828        }
4829        /**
4830         * Protobuf type {@code hadoop.hdfs.HeartbeatResponseProto}
4831         *
4832         * <pre>
4833         * void response
4834         * </pre>
4835         */
4836        public static final class Builder extends
4837            com.google.protobuf.GeneratedMessage.Builder<Builder>
4838           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProtoOrBuilder {
4839          public static final com.google.protobuf.Descriptors.Descriptor
4840              getDescriptor() {
4841            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor;
4842          }
4843    
4844          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4845              internalGetFieldAccessorTable() {
4846            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatResponseProto_fieldAccessorTable
4847                .ensureFieldAccessorsInitialized(
4848                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.Builder.class);
4849          }
4850    
4851          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.newBuilder()
4852          private Builder() {
4853            maybeForceBuilderInitialization();
4854          }
4855    
4856          private Builder(
4857              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4858            super(parent);
4859            maybeForceBuilderInitialization();
4860          }
4861          private void maybeForceBuilderInitialization() {
4862            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
4863            }
4864          }
4865          private static Builder create() {
4866            return new Builder();
4867          }
4868    
4869          public Builder clear() {
4870            super.clear();
4871            return this;
4872          }
4873    
4874          public Builder clone() {
4875            return create().mergeFrom(buildPartial());
4876          }
4877    
4878          public com.google.protobuf.Descriptors.Descriptor
4879              getDescriptorForType() {
4880            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor;
4881          }
4882    
4883          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto getDefaultInstanceForType() {
4884            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
4885          }
4886    
4887          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto build() {
4888            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto result = buildPartial();
4889            if (!result.isInitialized()) {
4890              throw newUninitializedMessageException(result);
4891            }
4892            return result;
4893          }
4894    
4895          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto buildPartial() {
4896            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto(this);
4897            onBuilt();
4898            return result;
4899          }
4900    
4901          public Builder mergeFrom(com.google.protobuf.Message other) {
4902            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) {
4903              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto)other);
4904            } else {
4905              super.mergeFrom(other);
4906              return this;
4907            }
4908          }
4909    
4910          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto other) {
4911            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance()) return this;
4912            this.mergeUnknownFields(other.getUnknownFields());
4913            return this;
4914          }
4915    
4916          public final boolean isInitialized() {
4917            return true;
4918          }
4919    
4920          public Builder mergeFrom(
4921              com.google.protobuf.CodedInputStream input,
4922              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4923              throws java.io.IOException {
4924            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parsedMessage = null;
4925            try {
4926              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
4927            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4928              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) e.getUnfinishedMessage();
4929              throw e;
4930            } finally {
4931              if (parsedMessage != null) {
4932                mergeFrom(parsedMessage);
4933              }
4934            }
4935            return this;
4936          }
4937    
4938          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.HeartbeatResponseProto)
4939        }
4940    
4941        static {
4942          defaultInstance = new HeartbeatResponseProto(true);
4943          defaultInstance.initFields();
4944        }
4945    
4946        // @@protoc_insertion_point(class_scope:hadoop.hdfs.HeartbeatResponseProto)
4947      }
4948    
4949      public interface StartLogSegmentRequestProtoOrBuilder
4950          extends com.google.protobuf.MessageOrBuilder {
4951    
4952        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
4953        /**
4954         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4955         */
4956        boolean hasReqInfo();
4957        /**
4958         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4959         */
4960        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
4961        /**
4962         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4963         */
4964        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
4965    
4966        // required uint64 txid = 2;
4967        /**
4968         * <code>required uint64 txid = 2;</code>
4969         *
4970         * <pre>
4971         * Transaction ID
4972         * </pre>
4973         */
4974        boolean hasTxid();
4975        /**
4976         * <code>required uint64 txid = 2;</code>
4977         *
4978         * <pre>
4979         * Transaction ID
4980         * </pre>
4981         */
4982        long getTxid();
4983      }
4984      /**
4985       * Protobuf type {@code hadoop.hdfs.StartLogSegmentRequestProto}
4986       *
4987       * <pre>
4988       **
4989       * startLogSegment()
4990       * </pre>
4991       */
4992      public static final class StartLogSegmentRequestProto extends
4993          com.google.protobuf.GeneratedMessage
4994          implements StartLogSegmentRequestProtoOrBuilder {
4995        // Use StartLogSegmentRequestProto.newBuilder() to construct.
4996        private StartLogSegmentRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
4997          super(builder);
4998          this.unknownFields = builder.getUnknownFields();
4999        }
5000        private StartLogSegmentRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
5001    
5002        private static final StartLogSegmentRequestProto defaultInstance;
5003        public static StartLogSegmentRequestProto getDefaultInstance() {
5004          return defaultInstance;
5005        }
5006    
5007        public StartLogSegmentRequestProto getDefaultInstanceForType() {
5008          return defaultInstance;
5009        }
5010    
5011        private final com.google.protobuf.UnknownFieldSet unknownFields;
5012        @java.lang.Override
5013        public final com.google.protobuf.UnknownFieldSet
5014            getUnknownFields() {
5015          return this.unknownFields;
5016        }
5017        private StartLogSegmentRequestProto(
5018            com.google.protobuf.CodedInputStream input,
5019            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5020            throws com.google.protobuf.InvalidProtocolBufferException {
5021          initFields();
5022          int mutable_bitField0_ = 0;
5023          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
5024              com.google.protobuf.UnknownFieldSet.newBuilder();
5025          try {
5026            boolean done = false;
5027            while (!done) {
5028              int tag = input.readTag();
5029              switch (tag) {
5030                case 0:
5031                  done = true;
5032                  break;
5033                default: {
5034                  if (!parseUnknownField(input, unknownFields,
5035                                         extensionRegistry, tag)) {
5036                    done = true;
5037                  }
5038                  break;
5039                }
5040                case 10: {
5041                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
5042                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
5043                    subBuilder = reqInfo_.toBuilder();
5044                  }
5045                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
5046                  if (subBuilder != null) {
5047                    subBuilder.mergeFrom(reqInfo_);
5048                    reqInfo_ = subBuilder.buildPartial();
5049                  }
5050                  bitField0_ |= 0x00000001;
5051                  break;
5052                }
5053                case 16: {
5054                  bitField0_ |= 0x00000002;
5055                  txid_ = input.readUInt64();
5056                  break;
5057                }
5058              }
5059            }
5060          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
5061            throw e.setUnfinishedMessage(this);
5062          } catch (java.io.IOException e) {
5063            throw new com.google.protobuf.InvalidProtocolBufferException(
5064                e.getMessage()).setUnfinishedMessage(this);
5065          } finally {
5066            this.unknownFields = unknownFields.build();
5067            makeExtensionsImmutable();
5068          }
5069        }
5070        public static final com.google.protobuf.Descriptors.Descriptor
5071            getDescriptor() {
5072          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor;
5073        }
5074    
5075        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
5076            internalGetFieldAccessorTable() {
5077          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentRequestProto_fieldAccessorTable
5078              .ensureFieldAccessorsInitialized(
5079                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.Builder.class);
5080        }
5081    
5082        public static com.google.protobuf.Parser<StartLogSegmentRequestProto> PARSER =
5083            new com.google.protobuf.AbstractParser<StartLogSegmentRequestProto>() {
5084          public StartLogSegmentRequestProto parsePartialFrom(
5085              com.google.protobuf.CodedInputStream input,
5086              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5087              throws com.google.protobuf.InvalidProtocolBufferException {
5088            return new StartLogSegmentRequestProto(input, extensionRegistry);
5089          }
5090        };
5091    
5092        @java.lang.Override
5093        public com.google.protobuf.Parser<StartLogSegmentRequestProto> getParserForType() {
5094          return PARSER;
5095        }
5096    
5097        private int bitField0_;
5098        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
5099        public static final int REQINFO_FIELD_NUMBER = 1;
5100        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
5101        /**
5102         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5103         */
5104        public boolean hasReqInfo() {
5105          return ((bitField0_ & 0x00000001) == 0x00000001);
5106        }
5107        /**
5108         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5109         */
5110        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
5111          return reqInfo_;
5112        }
5113        /**
5114         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5115         */
5116        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
5117          return reqInfo_;
5118        }
5119    
5120        // required uint64 txid = 2;
5121        public static final int TXID_FIELD_NUMBER = 2;
5122        private long txid_;
5123        /**
5124         * <code>required uint64 txid = 2;</code>
5125         *
5126         * <pre>
5127         * Transaction ID
5128         * </pre>
5129         */
5130        public boolean hasTxid() {
5131          return ((bitField0_ & 0x00000002) == 0x00000002);
5132        }
5133        /**
5134         * <code>required uint64 txid = 2;</code>
5135         *
5136         * <pre>
5137         * Transaction ID
5138         * </pre>
5139         */
5140        public long getTxid() {
5141          return txid_;
5142        }
5143    
5144        private void initFields() {
5145          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
5146          txid_ = 0L;
5147        }
5148        private byte memoizedIsInitialized = -1;
5149        public final boolean isInitialized() {
5150          byte isInitialized = memoizedIsInitialized;
5151          if (isInitialized != -1) return isInitialized == 1;
5152    
5153          if (!hasReqInfo()) {
5154            memoizedIsInitialized = 0;
5155            return false;
5156          }
5157          if (!hasTxid()) {
5158            memoizedIsInitialized = 0;
5159            return false;
5160          }
5161          if (!getReqInfo().isInitialized()) {
5162            memoizedIsInitialized = 0;
5163            return false;
5164          }
5165          memoizedIsInitialized = 1;
5166          return true;
5167        }
5168    
5169        public void writeTo(com.google.protobuf.CodedOutputStream output)
5170                            throws java.io.IOException {
5171          getSerializedSize();
5172          if (((bitField0_ & 0x00000001) == 0x00000001)) {
5173            output.writeMessage(1, reqInfo_);
5174          }
5175          if (((bitField0_ & 0x00000002) == 0x00000002)) {
5176            output.writeUInt64(2, txid_);
5177          }
5178          getUnknownFields().writeTo(output);
5179        }
5180    
5181        private int memoizedSerializedSize = -1;
5182        public int getSerializedSize() {
5183          int size = memoizedSerializedSize;
5184          if (size != -1) return size;
5185    
5186          size = 0;
5187          if (((bitField0_ & 0x00000001) == 0x00000001)) {
5188            size += com.google.protobuf.CodedOutputStream
5189              .computeMessageSize(1, reqInfo_);
5190          }
5191          if (((bitField0_ & 0x00000002) == 0x00000002)) {
5192            size += com.google.protobuf.CodedOutputStream
5193              .computeUInt64Size(2, txid_);
5194          }
5195          size += getUnknownFields().getSerializedSize();
5196          memoizedSerializedSize = size;
5197          return size;
5198        }
5199    
5200        private static final long serialVersionUID = 0L;
5201        @java.lang.Override
5202        protected java.lang.Object writeReplace()
5203            throws java.io.ObjectStreamException {
5204          return super.writeReplace();
5205        }
5206    
5207        @java.lang.Override
5208        public boolean equals(final java.lang.Object obj) {
5209          if (obj == this) {
5210           return true;
5211          }
5212          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)) {
5213            return super.equals(obj);
5214          }
5215          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto) obj;
5216    
5217          boolean result = true;
5218          result = result && (hasReqInfo() == other.hasReqInfo());
5219          if (hasReqInfo()) {
5220            result = result && getReqInfo()
5221                .equals(other.getReqInfo());
5222          }
5223          result = result && (hasTxid() == other.hasTxid());
5224          if (hasTxid()) {
5225            result = result && (getTxid()
5226                == other.getTxid());
5227          }
5228          result = result &&
5229              getUnknownFields().equals(other.getUnknownFields());
5230          return result;
5231        }
5232    
5233        private int memoizedHashCode = 0;
5234        @java.lang.Override
5235        public int hashCode() {
5236          if (memoizedHashCode != 0) {
5237            return memoizedHashCode;
5238          }
5239          int hash = 41;
5240          hash = (19 * hash) + getDescriptorForType().hashCode();
5241          if (hasReqInfo()) {
5242            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
5243            hash = (53 * hash) + getReqInfo().hashCode();
5244          }
5245          if (hasTxid()) {
5246            hash = (37 * hash) + TXID_FIELD_NUMBER;
5247            hash = (53 * hash) + hashLong(getTxid());
5248          }
5249          hash = (29 * hash) + getUnknownFields().hashCode();
5250          memoizedHashCode = hash;
5251          return hash;
5252        }
5253    
5254        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5255            com.google.protobuf.ByteString data)
5256            throws com.google.protobuf.InvalidProtocolBufferException {
5257          return PARSER.parseFrom(data);
5258        }
5259        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5260            com.google.protobuf.ByteString data,
5261            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5262            throws com.google.protobuf.InvalidProtocolBufferException {
5263          return PARSER.parseFrom(data, extensionRegistry);
5264        }
5265        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(byte[] data)
5266            throws com.google.protobuf.InvalidProtocolBufferException {
5267          return PARSER.parseFrom(data);
5268        }
5269        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5270            byte[] data,
5271            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5272            throws com.google.protobuf.InvalidProtocolBufferException {
5273          return PARSER.parseFrom(data, extensionRegistry);
5274        }
5275        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(java.io.InputStream input)
5276            throws java.io.IOException {
5277          return PARSER.parseFrom(input);
5278        }
5279        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5280            java.io.InputStream input,
5281            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5282            throws java.io.IOException {
5283          return PARSER.parseFrom(input, extensionRegistry);
5284        }
5285        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseDelimitedFrom(java.io.InputStream input)
5286            throws java.io.IOException {
5287          return PARSER.parseDelimitedFrom(input);
5288        }
5289        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseDelimitedFrom(
5290            java.io.InputStream input,
5291            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5292            throws java.io.IOException {
5293          return PARSER.parseDelimitedFrom(input, extensionRegistry);
5294        }
5295        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5296            com.google.protobuf.CodedInputStream input)
5297            throws java.io.IOException {
5298          return PARSER.parseFrom(input);
5299        }
5300        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5301            com.google.protobuf.CodedInputStream input,
5302            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5303            throws java.io.IOException {
5304          return PARSER.parseFrom(input, extensionRegistry);
5305        }
5306    
5307        public static Builder newBuilder() { return Builder.create(); }
5308        public Builder newBuilderForType() { return newBuilder(); }
5309        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto prototype) {
5310          return newBuilder().mergeFrom(prototype);
5311        }
5312        public Builder toBuilder() { return newBuilder(this); }
5313    
5314        @java.lang.Override
5315        protected Builder newBuilderForType(
5316            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
5317          Builder builder = new Builder(parent);
5318          return builder;
5319        }
5320        /**
5321         * Protobuf type {@code hadoop.hdfs.StartLogSegmentRequestProto}
5322         *
5323         * <pre>
5324         **
5325         * startLogSegment()
5326         * </pre>
5327         */
5328        public static final class Builder extends
5329            com.google.protobuf.GeneratedMessage.Builder<Builder>
5330           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProtoOrBuilder {
5331          public static final com.google.protobuf.Descriptors.Descriptor
5332              getDescriptor() {
5333            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor;
5334          }
5335    
5336          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
5337              internalGetFieldAccessorTable() {
5338            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentRequestProto_fieldAccessorTable
5339                .ensureFieldAccessorsInitialized(
5340                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.Builder.class);
5341          }
5342    
5343          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.newBuilder()
5344          private Builder() {
5345            maybeForceBuilderInitialization();
5346          }
5347    
5348          private Builder(
5349              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
5350            super(parent);
5351            maybeForceBuilderInitialization();
5352          }
5353          private void maybeForceBuilderInitialization() {
5354            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
5355              getReqInfoFieldBuilder();
5356            }
5357          }
5358          private static Builder create() {
5359            return new Builder();
5360          }
5361    
5362          public Builder clear() {
5363            super.clear();
5364            if (reqInfoBuilder_ == null) {
5365              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
5366            } else {
5367              reqInfoBuilder_.clear();
5368            }
5369            bitField0_ = (bitField0_ & ~0x00000001);
5370            txid_ = 0L;
5371            bitField0_ = (bitField0_ & ~0x00000002);
5372            return this;
5373          }
5374    
5375          public Builder clone() {
5376            return create().mergeFrom(buildPartial());
5377          }
5378    
5379          public com.google.protobuf.Descriptors.Descriptor
5380              getDescriptorForType() {
5381            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor;
5382          }
5383    
5384          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto getDefaultInstanceForType() {
5385            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance();
5386          }
5387    
5388          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto build() {
5389            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto result = buildPartial();
5390            if (!result.isInitialized()) {
5391              throw newUninitializedMessageException(result);
5392            }
5393            return result;
5394          }
5395    
5396          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto buildPartial() {
5397            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto(this);
5398            int from_bitField0_ = bitField0_;
5399            int to_bitField0_ = 0;
5400            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
5401              to_bitField0_ |= 0x00000001;
5402            }
5403            if (reqInfoBuilder_ == null) {
5404              result.reqInfo_ = reqInfo_;
5405            } else {
5406              result.reqInfo_ = reqInfoBuilder_.build();
5407            }
5408            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
5409              to_bitField0_ |= 0x00000002;
5410            }
5411            result.txid_ = txid_;
5412            result.bitField0_ = to_bitField0_;
5413            onBuilt();
5414            return result;
5415          }
5416    
5417          public Builder mergeFrom(com.google.protobuf.Message other) {
5418            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto) {
5419              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)other);
5420            } else {
5421              super.mergeFrom(other);
5422              return this;
5423            }
5424          }
5425    
5426          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto other) {
5427            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance()) return this;
5428            if (other.hasReqInfo()) {
5429              mergeReqInfo(other.getReqInfo());
5430            }
5431            if (other.hasTxid()) {
5432              setTxid(other.getTxid());
5433            }
5434            this.mergeUnknownFields(other.getUnknownFields());
5435            return this;
5436          }
5437    
5438          public final boolean isInitialized() {
5439            if (!hasReqInfo()) {
5440              
5441              return false;
5442            }
5443            if (!hasTxid()) {
5444              
5445              return false;
5446            }
5447            if (!getReqInfo().isInitialized()) {
5448              
5449              return false;
5450            }
5451            return true;
5452          }
5453    
5454          public Builder mergeFrom(
5455              com.google.protobuf.CodedInputStream input,
5456              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5457              throws java.io.IOException {
5458            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parsedMessage = null;
5459            try {
5460              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
5461            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
5462              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto) e.getUnfinishedMessage();
5463              throw e;
5464            } finally {
5465              if (parsedMessage != null) {
5466                mergeFrom(parsedMessage);
5467              }
5468            }
5469            return this;
5470          }
5471          private int bitField0_;
5472    
5473          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
5474          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
5475          private com.google.protobuf.SingleFieldBuilder<
5476              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
5477          /**
5478           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5479           */
5480          public boolean hasReqInfo() {
5481            return ((bitField0_ & 0x00000001) == 0x00000001);
5482          }
5483          /**
5484           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5485           */
5486          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
5487            if (reqInfoBuilder_ == null) {
5488              return reqInfo_;
5489            } else {
5490              return reqInfoBuilder_.getMessage();
5491            }
5492          }
5493          /**
5494           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5495           */
5496          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
5497            if (reqInfoBuilder_ == null) {
5498              if (value == null) {
5499                throw new NullPointerException();
5500              }
5501              reqInfo_ = value;
5502              onChanged();
5503            } else {
5504              reqInfoBuilder_.setMessage(value);
5505            }
5506            bitField0_ |= 0x00000001;
5507            return this;
5508          }
5509          /**
5510           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5511           */
5512          public Builder setReqInfo(
5513              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
5514            if (reqInfoBuilder_ == null) {
5515              reqInfo_ = builderForValue.build();
5516              onChanged();
5517            } else {
5518              reqInfoBuilder_.setMessage(builderForValue.build());
5519            }
5520            bitField0_ |= 0x00000001;
5521            return this;
5522          }
5523          /**
5524           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5525           */
5526          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
5527            if (reqInfoBuilder_ == null) {
5528              if (((bitField0_ & 0x00000001) == 0x00000001) &&
5529                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
5530                reqInfo_ =
5531                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
5532              } else {
5533                reqInfo_ = value;
5534              }
5535              onChanged();
5536            } else {
5537              reqInfoBuilder_.mergeFrom(value);
5538            }
5539            bitField0_ |= 0x00000001;
5540            return this;
5541          }
5542          /**
5543           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5544           */
5545          public Builder clearReqInfo() {
5546            if (reqInfoBuilder_ == null) {
5547              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
5548              onChanged();
5549            } else {
5550              reqInfoBuilder_.clear();
5551            }
5552            bitField0_ = (bitField0_ & ~0x00000001);
5553            return this;
5554          }
5555          /**
5556           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5557           */
5558          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
5559            bitField0_ |= 0x00000001;
5560            onChanged();
5561            return getReqInfoFieldBuilder().getBuilder();
5562          }
5563          /**
5564           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5565           */
5566          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
5567            if (reqInfoBuilder_ != null) {
5568              return reqInfoBuilder_.getMessageOrBuilder();
5569            } else {
5570              return reqInfo_;
5571            }
5572          }
5573          /**
5574           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5575           */
5576          private com.google.protobuf.SingleFieldBuilder<
5577              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
5578              getReqInfoFieldBuilder() {
5579            if (reqInfoBuilder_ == null) {
5580              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
5581                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
5582                      reqInfo_,
5583                      getParentForChildren(),
5584                      isClean());
5585              reqInfo_ = null;
5586            }
5587            return reqInfoBuilder_;
5588          }
5589    
5590          // required uint64 txid = 2;
5591          private long txid_ ;
5592          /**
5593           * <code>required uint64 txid = 2;</code>
5594           *
5595           * <pre>
5596           * Transaction ID
5597           * </pre>
5598           */
5599          public boolean hasTxid() {
5600            return ((bitField0_ & 0x00000002) == 0x00000002);
5601          }
5602          /**
5603           * <code>required uint64 txid = 2;</code>
5604           *
5605           * <pre>
5606           * Transaction ID
5607           * </pre>
5608           */
5609          public long getTxid() {
5610            return txid_;
5611          }
5612          /**
5613           * <code>required uint64 txid = 2;</code>
5614           *
5615           * <pre>
5616           * Transaction ID
5617           * </pre>
5618           */
5619          public Builder setTxid(long value) {
5620            bitField0_ |= 0x00000002;
5621            txid_ = value;
5622            onChanged();
5623            return this;
5624          }
5625          /**
5626           * <code>required uint64 txid = 2;</code>
5627           *
5628           * <pre>
5629           * Transaction ID
5630           * </pre>
5631           */
5632          public Builder clearTxid() {
5633            bitField0_ = (bitField0_ & ~0x00000002);
5634            txid_ = 0L;
5635            onChanged();
5636            return this;
5637          }
5638    
5639          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StartLogSegmentRequestProto)
5640        }
5641    
5642        static {
5643          defaultInstance = new StartLogSegmentRequestProto(true);
5644          defaultInstance.initFields();
5645        }
5646    
5647        // @@protoc_insertion_point(class_scope:hadoop.hdfs.StartLogSegmentRequestProto)
5648      }
5649    
5650      public interface StartLogSegmentResponseProtoOrBuilder
5651          extends com.google.protobuf.MessageOrBuilder {
5652      }
5653      /**
5654       * Protobuf type {@code hadoop.hdfs.StartLogSegmentResponseProto}
5655       */
5656      public static final class StartLogSegmentResponseProto extends
5657          com.google.protobuf.GeneratedMessage
5658          implements StartLogSegmentResponseProtoOrBuilder {
5659        // Use StartLogSegmentResponseProto.newBuilder() to construct.
5660        private StartLogSegmentResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
5661          super(builder);
5662          this.unknownFields = builder.getUnknownFields();
5663        }
5664        private StartLogSegmentResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
5665    
5666        private static final StartLogSegmentResponseProto defaultInstance;
5667        public static StartLogSegmentResponseProto getDefaultInstance() {
5668          return defaultInstance;
5669        }
5670    
5671        public StartLogSegmentResponseProto getDefaultInstanceForType() {
5672          return defaultInstance;
5673        }
5674    
5675        private final com.google.protobuf.UnknownFieldSet unknownFields;
5676        @java.lang.Override
5677        public final com.google.protobuf.UnknownFieldSet
5678            getUnknownFields() {
5679          return this.unknownFields;
5680        }
5681        private StartLogSegmentResponseProto(
5682            com.google.protobuf.CodedInputStream input,
5683            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5684            throws com.google.protobuf.InvalidProtocolBufferException {
5685          initFields();
5686          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
5687              com.google.protobuf.UnknownFieldSet.newBuilder();
5688          try {
5689            boolean done = false;
5690            while (!done) {
5691              int tag = input.readTag();
5692              switch (tag) {
5693                case 0:
5694                  done = true;
5695                  break;
5696                default: {
5697                  if (!parseUnknownField(input, unknownFields,
5698                                         extensionRegistry, tag)) {
5699                    done = true;
5700                  }
5701                  break;
5702                }
5703              }
5704            }
5705          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
5706            throw e.setUnfinishedMessage(this);
5707          } catch (java.io.IOException e) {
5708            throw new com.google.protobuf.InvalidProtocolBufferException(
5709                e.getMessage()).setUnfinishedMessage(this);
5710          } finally {
5711            this.unknownFields = unknownFields.build();
5712            makeExtensionsImmutable();
5713          }
5714        }
5715        public static final com.google.protobuf.Descriptors.Descriptor
5716            getDescriptor() {
5717          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor;
5718        }
5719    
5720        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
5721            internalGetFieldAccessorTable() {
5722          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentResponseProto_fieldAccessorTable
5723              .ensureFieldAccessorsInitialized(
5724                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.Builder.class);
5725        }
5726    
5727        public static com.google.protobuf.Parser<StartLogSegmentResponseProto> PARSER =
5728            new com.google.protobuf.AbstractParser<StartLogSegmentResponseProto>() {
5729          public StartLogSegmentResponseProto parsePartialFrom(
5730              com.google.protobuf.CodedInputStream input,
5731              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5732              throws com.google.protobuf.InvalidProtocolBufferException {
5733            return new StartLogSegmentResponseProto(input, extensionRegistry);
5734          }
5735        };
5736    
5737        @java.lang.Override
5738        public com.google.protobuf.Parser<StartLogSegmentResponseProto> getParserForType() {
5739          return PARSER;
5740        }
5741    
5742        private void initFields() {
5743        }
5744        private byte memoizedIsInitialized = -1;
5745        public final boolean isInitialized() {
5746          byte isInitialized = memoizedIsInitialized;
5747          if (isInitialized != -1) return isInitialized == 1;
5748    
5749          memoizedIsInitialized = 1;
5750          return true;
5751        }
5752    
5753        public void writeTo(com.google.protobuf.CodedOutputStream output)
5754                            throws java.io.IOException {
5755          getSerializedSize();
5756          getUnknownFields().writeTo(output);
5757        }
5758    
5759        private int memoizedSerializedSize = -1;
5760        public int getSerializedSize() {
5761          int size = memoizedSerializedSize;
5762          if (size != -1) return size;
5763    
5764          size = 0;
5765          size += getUnknownFields().getSerializedSize();
5766          memoizedSerializedSize = size;
5767          return size;
5768        }
5769    
5770        private static final long serialVersionUID = 0L;
5771        @java.lang.Override
5772        protected java.lang.Object writeReplace()
5773            throws java.io.ObjectStreamException {
5774          return super.writeReplace();
5775        }
5776    
5777        @java.lang.Override
5778        public boolean equals(final java.lang.Object obj) {
5779          if (obj == this) {
5780           return true;
5781          }
5782          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto)) {
5783            return super.equals(obj);
5784          }
5785          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) obj;
5786    
5787          boolean result = true;
5788          result = result &&
5789              getUnknownFields().equals(other.getUnknownFields());
5790          return result;
5791        }
5792    
5793        private int memoizedHashCode = 0;
5794        @java.lang.Override
5795        public int hashCode() {
5796          if (memoizedHashCode != 0) {
5797            return memoizedHashCode;
5798          }
5799          int hash = 41;
5800          hash = (19 * hash) + getDescriptorForType().hashCode();
5801          hash = (29 * hash) + getUnknownFields().hashCode();
5802          memoizedHashCode = hash;
5803          return hash;
5804        }
5805    
5806        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5807            com.google.protobuf.ByteString data)
5808            throws com.google.protobuf.InvalidProtocolBufferException {
5809          return PARSER.parseFrom(data);
5810        }
5811        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5812            com.google.protobuf.ByteString data,
5813            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5814            throws com.google.protobuf.InvalidProtocolBufferException {
5815          return PARSER.parseFrom(data, extensionRegistry);
5816        }
5817        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(byte[] data)
5818            throws com.google.protobuf.InvalidProtocolBufferException {
5819          return PARSER.parseFrom(data);
5820        }
5821        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5822            byte[] data,
5823            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5824            throws com.google.protobuf.InvalidProtocolBufferException {
5825          return PARSER.parseFrom(data, extensionRegistry);
5826        }
5827        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(java.io.InputStream input)
5828            throws java.io.IOException {
5829          return PARSER.parseFrom(input);
5830        }
5831        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5832            java.io.InputStream input,
5833            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5834            throws java.io.IOException {
5835          return PARSER.parseFrom(input, extensionRegistry);
5836        }
5837        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseDelimitedFrom(java.io.InputStream input)
5838            throws java.io.IOException {
5839          return PARSER.parseDelimitedFrom(input);
5840        }
5841        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseDelimitedFrom(
5842            java.io.InputStream input,
5843            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5844            throws java.io.IOException {
5845          return PARSER.parseDelimitedFrom(input, extensionRegistry);
5846        }
5847        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5848            com.google.protobuf.CodedInputStream input)
5849            throws java.io.IOException {
5850          return PARSER.parseFrom(input);
5851        }
5852        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5853            com.google.protobuf.CodedInputStream input,
5854            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5855            throws java.io.IOException {
5856          return PARSER.parseFrom(input, extensionRegistry);
5857        }
5858    
5859        public static Builder newBuilder() { return Builder.create(); }
5860        public Builder newBuilderForType() { return newBuilder(); }
5861        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto prototype) {
5862          return newBuilder().mergeFrom(prototype);
5863        }
5864        public Builder toBuilder() { return newBuilder(this); }
5865    
5866        @java.lang.Override
5867        protected Builder newBuilderForType(
5868            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
5869          Builder builder = new Builder(parent);
5870          return builder;
5871        }
5872        /**
5873         * Protobuf type {@code hadoop.hdfs.StartLogSegmentResponseProto}
5874         */
5875        public static final class Builder extends
5876            com.google.protobuf.GeneratedMessage.Builder<Builder>
5877           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProtoOrBuilder {
5878          public static final com.google.protobuf.Descriptors.Descriptor
5879              getDescriptor() {
5880            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor;
5881          }
5882    
5883          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
5884              internalGetFieldAccessorTable() {
5885            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentResponseProto_fieldAccessorTable
5886                .ensureFieldAccessorsInitialized(
5887                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.Builder.class);
5888          }
5889    
5890          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.newBuilder()
5891          private Builder() {
5892            maybeForceBuilderInitialization();
5893          }
5894    
5895          private Builder(
5896              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
5897            super(parent);
5898            maybeForceBuilderInitialization();
5899          }
5900          private void maybeForceBuilderInitialization() {
5901            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
5902            }
5903          }
5904          private static Builder create() {
5905            return new Builder();
5906          }
5907    
5908          public Builder clear() {
5909            super.clear();
5910            return this;
5911          }
5912    
5913          public Builder clone() {
5914            return create().mergeFrom(buildPartial());
5915          }
5916    
5917          public com.google.protobuf.Descriptors.Descriptor
5918              getDescriptorForType() {
5919            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor;
5920          }
5921    
5922          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto getDefaultInstanceForType() {
5923            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance();
5924          }
5925    
5926          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto build() {
5927            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto result = buildPartial();
5928            if (!result.isInitialized()) {
5929              throw newUninitializedMessageException(result);
5930            }
5931            return result;
5932          }
5933    
5934          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto buildPartial() {
5935            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto(this);
5936            onBuilt();
5937            return result;
5938          }
5939    
5940          public Builder mergeFrom(com.google.protobuf.Message other) {
5941            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) {
5942              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto)other);
5943            } else {
5944              super.mergeFrom(other);
5945              return this;
5946            }
5947          }
5948    
5949          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto other) {
5950            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance()) return this;
5951            this.mergeUnknownFields(other.getUnknownFields());
5952            return this;
5953          }
5954    
5955          public final boolean isInitialized() {
5956            return true;
5957          }
5958    
5959          public Builder mergeFrom(
5960              com.google.protobuf.CodedInputStream input,
5961              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5962              throws java.io.IOException {
5963            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parsedMessage = null;
5964            try {
5965              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
5966            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
5967              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) e.getUnfinishedMessage();
5968              throw e;
5969            } finally {
5970              if (parsedMessage != null) {
5971                mergeFrom(parsedMessage);
5972              }
5973            }
5974            return this;
5975          }
5976    
5977          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StartLogSegmentResponseProto)
5978        }
5979    
5980        static {
5981          defaultInstance = new StartLogSegmentResponseProto(true);
5982          defaultInstance.initFields();
5983        }
5984    
5985        // @@protoc_insertion_point(class_scope:hadoop.hdfs.StartLogSegmentResponseProto)
5986      }
5987    
5988      public interface FinalizeLogSegmentRequestProtoOrBuilder
5989          extends com.google.protobuf.MessageOrBuilder {
5990    
5991        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
5992        /**
5993         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5994         */
5995        boolean hasReqInfo();
5996        /**
5997         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5998         */
5999        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
6000        /**
6001         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6002         */
6003        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
6004    
6005        // required uint64 startTxId = 2;
6006        /**
6007         * <code>required uint64 startTxId = 2;</code>
6008         */
6009        boolean hasStartTxId();
6010        /**
6011         * <code>required uint64 startTxId = 2;</code>
6012         */
6013        long getStartTxId();
6014    
6015        // required uint64 endTxId = 3;
6016        /**
6017         * <code>required uint64 endTxId = 3;</code>
6018         */
6019        boolean hasEndTxId();
6020        /**
6021         * <code>required uint64 endTxId = 3;</code>
6022         */
6023        long getEndTxId();
6024      }
6025      /**
6026       * Protobuf type {@code hadoop.hdfs.FinalizeLogSegmentRequestProto}
6027       *
6028       * <pre>
6029       **
6030       * finalizeLogSegment()
6031       * </pre>
6032       */
6033      public static final class FinalizeLogSegmentRequestProto extends
6034          com.google.protobuf.GeneratedMessage
6035          implements FinalizeLogSegmentRequestProtoOrBuilder {
6036        // Use FinalizeLogSegmentRequestProto.newBuilder() to construct.
6037        private FinalizeLogSegmentRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
6038          super(builder);
6039          this.unknownFields = builder.getUnknownFields();
6040        }
6041        private FinalizeLogSegmentRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
6042    
6043        private static final FinalizeLogSegmentRequestProto defaultInstance;
6044        public static FinalizeLogSegmentRequestProto getDefaultInstance() {
6045          return defaultInstance;
6046        }
6047    
6048        public FinalizeLogSegmentRequestProto getDefaultInstanceForType() {
6049          return defaultInstance;
6050        }
6051    
6052        private final com.google.protobuf.UnknownFieldSet unknownFields;
6053        @java.lang.Override
6054        public final com.google.protobuf.UnknownFieldSet
6055            getUnknownFields() {
6056          return this.unknownFields;
6057        }
6058        private FinalizeLogSegmentRequestProto(
6059            com.google.protobuf.CodedInputStream input,
6060            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6061            throws com.google.protobuf.InvalidProtocolBufferException {
6062          initFields();
6063          int mutable_bitField0_ = 0;
6064          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
6065              com.google.protobuf.UnknownFieldSet.newBuilder();
6066          try {
6067            boolean done = false;
6068            while (!done) {
6069              int tag = input.readTag();
6070              switch (tag) {
6071                case 0:
6072                  done = true;
6073                  break;
6074                default: {
6075                  if (!parseUnknownField(input, unknownFields,
6076                                         extensionRegistry, tag)) {
6077                    done = true;
6078                  }
6079                  break;
6080                }
6081                case 10: {
6082                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
6083                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
6084                    subBuilder = reqInfo_.toBuilder();
6085                  }
6086                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
6087                  if (subBuilder != null) {
6088                    subBuilder.mergeFrom(reqInfo_);
6089                    reqInfo_ = subBuilder.buildPartial();
6090                  }
6091                  bitField0_ |= 0x00000001;
6092                  break;
6093                }
6094                case 16: {
6095                  bitField0_ |= 0x00000002;
6096                  startTxId_ = input.readUInt64();
6097                  break;
6098                }
6099                case 24: {
6100                  bitField0_ |= 0x00000004;
6101                  endTxId_ = input.readUInt64();
6102                  break;
6103                }
6104              }
6105            }
6106          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
6107            throw e.setUnfinishedMessage(this);
6108          } catch (java.io.IOException e) {
6109            throw new com.google.protobuf.InvalidProtocolBufferException(
6110                e.getMessage()).setUnfinishedMessage(this);
6111          } finally {
6112            this.unknownFields = unknownFields.build();
6113            makeExtensionsImmutable();
6114          }
6115        }
6116        public static final com.google.protobuf.Descriptors.Descriptor
6117            getDescriptor() {
6118          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor;
6119        }
6120    
6121        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6122            internalGetFieldAccessorTable() {
6123          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_fieldAccessorTable
6124              .ensureFieldAccessorsInitialized(
6125                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.Builder.class);
6126        }
6127    
6128        public static com.google.protobuf.Parser<FinalizeLogSegmentRequestProto> PARSER =
6129            new com.google.protobuf.AbstractParser<FinalizeLogSegmentRequestProto>() {
6130          public FinalizeLogSegmentRequestProto parsePartialFrom(
6131              com.google.protobuf.CodedInputStream input,
6132              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6133              throws com.google.protobuf.InvalidProtocolBufferException {
6134            return new FinalizeLogSegmentRequestProto(input, extensionRegistry);
6135          }
6136        };
6137    
6138        @java.lang.Override
6139        public com.google.protobuf.Parser<FinalizeLogSegmentRequestProto> getParserForType() {
6140          return PARSER;
6141        }
6142    
6143        private int bitField0_;
6144        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
6145        public static final int REQINFO_FIELD_NUMBER = 1;
6146        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
6147        /**
6148         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6149         */
6150        public boolean hasReqInfo() {
6151          return ((bitField0_ & 0x00000001) == 0x00000001);
6152        }
6153        /**
6154         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6155         */
6156        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
6157          return reqInfo_;
6158        }
6159        /**
6160         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6161         */
6162        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
6163          return reqInfo_;
6164        }
6165    
6166        // required uint64 startTxId = 2;
6167        public static final int STARTTXID_FIELD_NUMBER = 2;
6168        private long startTxId_;
6169        /**
6170         * <code>required uint64 startTxId = 2;</code>
6171         */
6172        public boolean hasStartTxId() {
6173          return ((bitField0_ & 0x00000002) == 0x00000002);
6174        }
6175        /**
6176         * <code>required uint64 startTxId = 2;</code>
6177         */
6178        public long getStartTxId() {
6179          return startTxId_;
6180        }
6181    
6182        // required uint64 endTxId = 3;
6183        public static final int ENDTXID_FIELD_NUMBER = 3;
6184        private long endTxId_;
6185        /**
6186         * <code>required uint64 endTxId = 3;</code>
6187         */
6188        public boolean hasEndTxId() {
6189          return ((bitField0_ & 0x00000004) == 0x00000004);
6190        }
6191        /**
6192         * <code>required uint64 endTxId = 3;</code>
6193         */
6194        public long getEndTxId() {
6195          return endTxId_;
6196        }
6197    
6198        private void initFields() {
6199          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
6200          startTxId_ = 0L;
6201          endTxId_ = 0L;
6202        }
6203        private byte memoizedIsInitialized = -1;
6204        public final boolean isInitialized() {
6205          byte isInitialized = memoizedIsInitialized;
6206          if (isInitialized != -1) return isInitialized == 1;
6207    
6208          if (!hasReqInfo()) {
6209            memoizedIsInitialized = 0;
6210            return false;
6211          }
6212          if (!hasStartTxId()) {
6213            memoizedIsInitialized = 0;
6214            return false;
6215          }
6216          if (!hasEndTxId()) {
6217            memoizedIsInitialized = 0;
6218            return false;
6219          }
6220          if (!getReqInfo().isInitialized()) {
6221            memoizedIsInitialized = 0;
6222            return false;
6223          }
6224          memoizedIsInitialized = 1;
6225          return true;
6226        }
6227    
6228        public void writeTo(com.google.protobuf.CodedOutputStream output)
6229                            throws java.io.IOException {
6230          getSerializedSize();
6231          if (((bitField0_ & 0x00000001) == 0x00000001)) {
6232            output.writeMessage(1, reqInfo_);
6233          }
6234          if (((bitField0_ & 0x00000002) == 0x00000002)) {
6235            output.writeUInt64(2, startTxId_);
6236          }
6237          if (((bitField0_ & 0x00000004) == 0x00000004)) {
6238            output.writeUInt64(3, endTxId_);
6239          }
6240          getUnknownFields().writeTo(output);
6241        }
6242    
6243        private int memoizedSerializedSize = -1;
6244        public int getSerializedSize() {
6245          int size = memoizedSerializedSize;
6246          if (size != -1) return size;
6247    
6248          size = 0;
6249          if (((bitField0_ & 0x00000001) == 0x00000001)) {
6250            size += com.google.protobuf.CodedOutputStream
6251              .computeMessageSize(1, reqInfo_);
6252          }
6253          if (((bitField0_ & 0x00000002) == 0x00000002)) {
6254            size += com.google.protobuf.CodedOutputStream
6255              .computeUInt64Size(2, startTxId_);
6256          }
6257          if (((bitField0_ & 0x00000004) == 0x00000004)) {
6258            size += com.google.protobuf.CodedOutputStream
6259              .computeUInt64Size(3, endTxId_);
6260          }
6261          size += getUnknownFields().getSerializedSize();
6262          memoizedSerializedSize = size;
6263          return size;
6264        }
6265    
6266        private static final long serialVersionUID = 0L;
6267        @java.lang.Override
6268        protected java.lang.Object writeReplace()
6269            throws java.io.ObjectStreamException {
6270          return super.writeReplace();
6271        }
6272    
6273        @java.lang.Override
6274        public boolean equals(final java.lang.Object obj) {
6275          if (obj == this) {
6276           return true;
6277          }
6278          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)) {
6279            return super.equals(obj);
6280          }
6281          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto) obj;
6282    
6283          boolean result = true;
6284          result = result && (hasReqInfo() == other.hasReqInfo());
6285          if (hasReqInfo()) {
6286            result = result && getReqInfo()
6287                .equals(other.getReqInfo());
6288          }
6289          result = result && (hasStartTxId() == other.hasStartTxId());
6290          if (hasStartTxId()) {
6291            result = result && (getStartTxId()
6292                == other.getStartTxId());
6293          }
6294          result = result && (hasEndTxId() == other.hasEndTxId());
6295          if (hasEndTxId()) {
6296            result = result && (getEndTxId()
6297                == other.getEndTxId());
6298          }
6299          result = result &&
6300              getUnknownFields().equals(other.getUnknownFields());
6301          return result;
6302        }
6303    
6304        private int memoizedHashCode = 0;
6305        @java.lang.Override
6306        public int hashCode() {
6307          if (memoizedHashCode != 0) {
6308            return memoizedHashCode;
6309          }
6310          int hash = 41;
6311          hash = (19 * hash) + getDescriptorForType().hashCode();
6312          if (hasReqInfo()) {
6313            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
6314            hash = (53 * hash) + getReqInfo().hashCode();
6315          }
6316          if (hasStartTxId()) {
6317            hash = (37 * hash) + STARTTXID_FIELD_NUMBER;
6318            hash = (53 * hash) + hashLong(getStartTxId());
6319          }
6320          if (hasEndTxId()) {
6321            hash = (37 * hash) + ENDTXID_FIELD_NUMBER;
6322            hash = (53 * hash) + hashLong(getEndTxId());
6323          }
6324          hash = (29 * hash) + getUnknownFields().hashCode();
6325          memoizedHashCode = hash;
6326          return hash;
6327        }
6328    
6329        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6330            com.google.protobuf.ByteString data)
6331            throws com.google.protobuf.InvalidProtocolBufferException {
6332          return PARSER.parseFrom(data);
6333        }
6334        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6335            com.google.protobuf.ByteString data,
6336            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6337            throws com.google.protobuf.InvalidProtocolBufferException {
6338          return PARSER.parseFrom(data, extensionRegistry);
6339        }
6340        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(byte[] data)
6341            throws com.google.protobuf.InvalidProtocolBufferException {
6342          return PARSER.parseFrom(data);
6343        }
6344        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6345            byte[] data,
6346            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6347            throws com.google.protobuf.InvalidProtocolBufferException {
6348          return PARSER.parseFrom(data, extensionRegistry);
6349        }
6350        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(java.io.InputStream input)
6351            throws java.io.IOException {
6352          return PARSER.parseFrom(input);
6353        }
6354        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6355            java.io.InputStream input,
6356            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6357            throws java.io.IOException {
6358          return PARSER.parseFrom(input, extensionRegistry);
6359        }
6360        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseDelimitedFrom(java.io.InputStream input)
6361            throws java.io.IOException {
6362          return PARSER.parseDelimitedFrom(input);
6363        }
6364        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseDelimitedFrom(
6365            java.io.InputStream input,
6366            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6367            throws java.io.IOException {
6368          return PARSER.parseDelimitedFrom(input, extensionRegistry);
6369        }
6370        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6371            com.google.protobuf.CodedInputStream input)
6372            throws java.io.IOException {
6373          return PARSER.parseFrom(input);
6374        }
6375        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6376            com.google.protobuf.CodedInputStream input,
6377            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6378            throws java.io.IOException {
6379          return PARSER.parseFrom(input, extensionRegistry);
6380        }
6381    
6382        public static Builder newBuilder() { return Builder.create(); }
6383        public Builder newBuilderForType() { return newBuilder(); }
6384        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto prototype) {
6385          return newBuilder().mergeFrom(prototype);
6386        }
6387        public Builder toBuilder() { return newBuilder(this); }
6388    
6389        @java.lang.Override
6390        protected Builder newBuilderForType(
6391            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
6392          Builder builder = new Builder(parent);
6393          return builder;
6394        }
6395        /**
6396         * Protobuf type {@code hadoop.hdfs.FinalizeLogSegmentRequestProto}
6397         *
6398         * <pre>
6399         **
6400         * finalizeLogSegment()
6401         * </pre>
6402         */
6403        public static final class Builder extends
6404            com.google.protobuf.GeneratedMessage.Builder<Builder>
6405           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProtoOrBuilder {
6406          public static final com.google.protobuf.Descriptors.Descriptor
6407              getDescriptor() {
6408            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor;
6409          }
6410    
6411          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6412              internalGetFieldAccessorTable() {
6413            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_fieldAccessorTable
6414                .ensureFieldAccessorsInitialized(
6415                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.Builder.class);
6416          }
6417    
6418          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.newBuilder()
6419          private Builder() {
6420            maybeForceBuilderInitialization();
6421          }
6422    
6423          private Builder(
6424              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
6425            super(parent);
6426            maybeForceBuilderInitialization();
6427          }
6428          private void maybeForceBuilderInitialization() {
6429            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
6430              getReqInfoFieldBuilder();
6431            }
6432          }
6433          private static Builder create() {
6434            return new Builder();
6435          }
6436    
6437          public Builder clear() {
6438            super.clear();
6439            if (reqInfoBuilder_ == null) {
6440              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
6441            } else {
6442              reqInfoBuilder_.clear();
6443            }
6444            bitField0_ = (bitField0_ & ~0x00000001);
6445            startTxId_ = 0L;
6446            bitField0_ = (bitField0_ & ~0x00000002);
6447            endTxId_ = 0L;
6448            bitField0_ = (bitField0_ & ~0x00000004);
6449            return this;
6450          }
6451    
6452          public Builder clone() {
6453            return create().mergeFrom(buildPartial());
6454          }
6455    
6456          public com.google.protobuf.Descriptors.Descriptor
6457              getDescriptorForType() {
6458            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor;
6459          }
6460    
6461          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto getDefaultInstanceForType() {
6462            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance();
6463          }
6464    
6465          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto build() {
6466            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto result = buildPartial();
6467            if (!result.isInitialized()) {
6468              throw newUninitializedMessageException(result);
6469            }
6470            return result;
6471          }
6472    
6473          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto buildPartial() {
6474            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto(this);
6475            int from_bitField0_ = bitField0_;
6476            int to_bitField0_ = 0;
6477            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
6478              to_bitField0_ |= 0x00000001;
6479            }
6480            if (reqInfoBuilder_ == null) {
6481              result.reqInfo_ = reqInfo_;
6482            } else {
6483              result.reqInfo_ = reqInfoBuilder_.build();
6484            }
6485            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
6486              to_bitField0_ |= 0x00000002;
6487            }
6488            result.startTxId_ = startTxId_;
6489            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
6490              to_bitField0_ |= 0x00000004;
6491            }
6492            result.endTxId_ = endTxId_;
6493            result.bitField0_ = to_bitField0_;
6494            onBuilt();
6495            return result;
6496          }
6497    
6498          public Builder mergeFrom(com.google.protobuf.Message other) {
6499            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto) {
6500              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)other);
6501            } else {
6502              super.mergeFrom(other);
6503              return this;
6504            }
6505          }
6506    
6507          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto other) {
6508            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance()) return this;
6509            if (other.hasReqInfo()) {
6510              mergeReqInfo(other.getReqInfo());
6511            }
6512            if (other.hasStartTxId()) {
6513              setStartTxId(other.getStartTxId());
6514            }
6515            if (other.hasEndTxId()) {
6516              setEndTxId(other.getEndTxId());
6517            }
6518            this.mergeUnknownFields(other.getUnknownFields());
6519            return this;
6520          }
6521    
6522          public final boolean isInitialized() {
6523            if (!hasReqInfo()) {
6524              
6525              return false;
6526            }
6527            if (!hasStartTxId()) {
6528              
6529              return false;
6530            }
6531            if (!hasEndTxId()) {
6532              
6533              return false;
6534            }
6535            if (!getReqInfo().isInitialized()) {
6536              
6537              return false;
6538            }
6539            return true;
6540          }
6541    
6542          public Builder mergeFrom(
6543              com.google.protobuf.CodedInputStream input,
6544              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6545              throws java.io.IOException {
6546            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parsedMessage = null;
6547            try {
6548              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
6549            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
6550              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto) e.getUnfinishedMessage();
6551              throw e;
6552            } finally {
6553              if (parsedMessage != null) {
6554                mergeFrom(parsedMessage);
6555              }
6556            }
6557            return this;
6558          }
6559          private int bitField0_;
6560    
6561          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
6562          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
6563          private com.google.protobuf.SingleFieldBuilder<
6564              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
6565          /**
6566           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6567           */
6568          public boolean hasReqInfo() {
6569            return ((bitField0_ & 0x00000001) == 0x00000001);
6570          }
6571          /**
6572           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6573           */
6574          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
6575            if (reqInfoBuilder_ == null) {
6576              return reqInfo_;
6577            } else {
6578              return reqInfoBuilder_.getMessage();
6579            }
6580          }
6581          /**
6582           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6583           */
6584          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
6585            if (reqInfoBuilder_ == null) {
6586              if (value == null) {
6587                throw new NullPointerException();
6588              }
6589              reqInfo_ = value;
6590              onChanged();
6591            } else {
6592              reqInfoBuilder_.setMessage(value);
6593            }
6594            bitField0_ |= 0x00000001;
6595            return this;
6596          }
6597          /**
6598           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6599           */
6600          public Builder setReqInfo(
6601              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
6602            if (reqInfoBuilder_ == null) {
6603              reqInfo_ = builderForValue.build();
6604              onChanged();
6605            } else {
6606              reqInfoBuilder_.setMessage(builderForValue.build());
6607            }
6608            bitField0_ |= 0x00000001;
6609            return this;
6610          }
6611          /**
6612           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6613           */
6614          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
6615            if (reqInfoBuilder_ == null) {
6616              if (((bitField0_ & 0x00000001) == 0x00000001) &&
6617                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
6618                reqInfo_ =
6619                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
6620              } else {
6621                reqInfo_ = value;
6622              }
6623              onChanged();
6624            } else {
6625              reqInfoBuilder_.mergeFrom(value);
6626            }
6627            bitField0_ |= 0x00000001;
6628            return this;
6629          }
6630          /**
6631           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6632           */
6633          public Builder clearReqInfo() {
6634            if (reqInfoBuilder_ == null) {
6635              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
6636              onChanged();
6637            } else {
6638              reqInfoBuilder_.clear();
6639            }
6640            bitField0_ = (bitField0_ & ~0x00000001);
6641            return this;
6642          }
6643          /**
6644           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6645           */
6646          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
6647            bitField0_ |= 0x00000001;
6648            onChanged();
6649            return getReqInfoFieldBuilder().getBuilder();
6650          }
6651          /**
6652           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6653           */
6654          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
6655            if (reqInfoBuilder_ != null) {
6656              return reqInfoBuilder_.getMessageOrBuilder();
6657            } else {
6658              return reqInfo_;
6659            }
6660          }
6661          /**
6662           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6663           */
6664          private com.google.protobuf.SingleFieldBuilder<
6665              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
6666              getReqInfoFieldBuilder() {
6667            if (reqInfoBuilder_ == null) {
6668              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
6669                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
6670                      reqInfo_,
6671                      getParentForChildren(),
6672                      isClean());
6673              reqInfo_ = null;
6674            }
6675            return reqInfoBuilder_;
6676          }
6677    
6678          // required uint64 startTxId = 2;
6679          private long startTxId_ ;
6680          /**
6681           * <code>required uint64 startTxId = 2;</code>
6682           */
6683          public boolean hasStartTxId() {
6684            return ((bitField0_ & 0x00000002) == 0x00000002);
6685          }
6686          /**
6687           * <code>required uint64 startTxId = 2;</code>
6688           */
6689          public long getStartTxId() {
6690            return startTxId_;
6691          }
6692          /**
6693           * <code>required uint64 startTxId = 2;</code>
6694           */
6695          public Builder setStartTxId(long value) {
6696            bitField0_ |= 0x00000002;
6697            startTxId_ = value;
6698            onChanged();
6699            return this;
6700          }
6701          /**
6702           * <code>required uint64 startTxId = 2;</code>
6703           */
6704          public Builder clearStartTxId() {
6705            bitField0_ = (bitField0_ & ~0x00000002);
6706            startTxId_ = 0L;
6707            onChanged();
6708            return this;
6709          }
6710    
6711          // required uint64 endTxId = 3;
6712          private long endTxId_ ;
6713          /**
6714           * <code>required uint64 endTxId = 3;</code>
6715           */
6716          public boolean hasEndTxId() {
6717            return ((bitField0_ & 0x00000004) == 0x00000004);
6718          }
6719          /**
6720           * <code>required uint64 endTxId = 3;</code>
6721           */
6722          public long getEndTxId() {
6723            return endTxId_;
6724          }
6725          /**
6726           * <code>required uint64 endTxId = 3;</code>
6727           */
6728          public Builder setEndTxId(long value) {
6729            bitField0_ |= 0x00000004;
6730            endTxId_ = value;
6731            onChanged();
6732            return this;
6733          }
6734          /**
6735           * <code>required uint64 endTxId = 3;</code>
6736           */
6737          public Builder clearEndTxId() {
6738            bitField0_ = (bitField0_ & ~0x00000004);
6739            endTxId_ = 0L;
6740            onChanged();
6741            return this;
6742          }
6743    
6744          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FinalizeLogSegmentRequestProto)
6745        }
6746    
6747        static {
6748          defaultInstance = new FinalizeLogSegmentRequestProto(true);
6749          defaultInstance.initFields();
6750        }
6751    
6752        // @@protoc_insertion_point(class_scope:hadoop.hdfs.FinalizeLogSegmentRequestProto)
6753      }
6754    
6755      public interface FinalizeLogSegmentResponseProtoOrBuilder
6756          extends com.google.protobuf.MessageOrBuilder {
6757      }
6758      /**
6759       * Protobuf type {@code hadoop.hdfs.FinalizeLogSegmentResponseProto}
6760       */
6761      public static final class FinalizeLogSegmentResponseProto extends
6762          com.google.protobuf.GeneratedMessage
6763          implements FinalizeLogSegmentResponseProtoOrBuilder {
6764        // Use FinalizeLogSegmentResponseProto.newBuilder() to construct.
6765        private FinalizeLogSegmentResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
6766          super(builder);
6767          this.unknownFields = builder.getUnknownFields();
6768        }
6769        private FinalizeLogSegmentResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
6770    
6771        private static final FinalizeLogSegmentResponseProto defaultInstance;
6772        public static FinalizeLogSegmentResponseProto getDefaultInstance() {
6773          return defaultInstance;
6774        }
6775    
6776        public FinalizeLogSegmentResponseProto getDefaultInstanceForType() {
6777          return defaultInstance;
6778        }
6779    
6780        private final com.google.protobuf.UnknownFieldSet unknownFields;
6781        @java.lang.Override
6782        public final com.google.protobuf.UnknownFieldSet
6783            getUnknownFields() {
6784          return this.unknownFields;
6785        }
6786        private FinalizeLogSegmentResponseProto(
6787            com.google.protobuf.CodedInputStream input,
6788            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6789            throws com.google.protobuf.InvalidProtocolBufferException {
6790          initFields();
6791          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
6792              com.google.protobuf.UnknownFieldSet.newBuilder();
6793          try {
6794            boolean done = false;
6795            while (!done) {
6796              int tag = input.readTag();
6797              switch (tag) {
6798                case 0:
6799                  done = true;
6800                  break;
6801                default: {
6802                  if (!parseUnknownField(input, unknownFields,
6803                                         extensionRegistry, tag)) {
6804                    done = true;
6805                  }
6806                  break;
6807                }
6808              }
6809            }
6810          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
6811            throw e.setUnfinishedMessage(this);
6812          } catch (java.io.IOException e) {
6813            throw new com.google.protobuf.InvalidProtocolBufferException(
6814                e.getMessage()).setUnfinishedMessage(this);
6815          } finally {
6816            this.unknownFields = unknownFields.build();
6817            makeExtensionsImmutable();
6818          }
6819        }
6820        public static final com.google.protobuf.Descriptors.Descriptor
6821            getDescriptor() {
6822          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor;
6823        }
6824    
6825        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6826            internalGetFieldAccessorTable() {
6827          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_fieldAccessorTable
6828              .ensureFieldAccessorsInitialized(
6829                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.Builder.class);
6830        }
6831    
6832        public static com.google.protobuf.Parser<FinalizeLogSegmentResponseProto> PARSER =
6833            new com.google.protobuf.AbstractParser<FinalizeLogSegmentResponseProto>() {
6834          public FinalizeLogSegmentResponseProto parsePartialFrom(
6835              com.google.protobuf.CodedInputStream input,
6836              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6837              throws com.google.protobuf.InvalidProtocolBufferException {
6838            return new FinalizeLogSegmentResponseProto(input, extensionRegistry);
6839          }
6840        };
6841    
6842        @java.lang.Override
6843        public com.google.protobuf.Parser<FinalizeLogSegmentResponseProto> getParserForType() {
6844          return PARSER;
6845        }
6846    
6847        private void initFields() {
6848        }
6849        private byte memoizedIsInitialized = -1;
6850        public final boolean isInitialized() {
6851          byte isInitialized = memoizedIsInitialized;
6852          if (isInitialized != -1) return isInitialized == 1;
6853    
6854          memoizedIsInitialized = 1;
6855          return true;
6856        }
6857    
6858        public void writeTo(com.google.protobuf.CodedOutputStream output)
6859                            throws java.io.IOException {
6860          getSerializedSize();
6861          getUnknownFields().writeTo(output);
6862        }
6863    
6864        private int memoizedSerializedSize = -1;
6865        public int getSerializedSize() {
6866          int size = memoizedSerializedSize;
6867          if (size != -1) return size;
6868    
6869          size = 0;
6870          size += getUnknownFields().getSerializedSize();
6871          memoizedSerializedSize = size;
6872          return size;
6873        }
6874    
6875        private static final long serialVersionUID = 0L;
6876        @java.lang.Override
6877        protected java.lang.Object writeReplace()
6878            throws java.io.ObjectStreamException {
6879          return super.writeReplace();
6880        }
6881    
6882        @java.lang.Override
6883        public boolean equals(final java.lang.Object obj) {
6884          if (obj == this) {
6885           return true;
6886          }
6887          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto)) {
6888            return super.equals(obj);
6889          }
6890          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) obj;
6891    
6892          boolean result = true;
6893          result = result &&
6894              getUnknownFields().equals(other.getUnknownFields());
6895          return result;
6896        }
6897    
6898        private int memoizedHashCode = 0;
6899        @java.lang.Override
6900        public int hashCode() {
6901          if (memoizedHashCode != 0) {
6902            return memoizedHashCode;
6903          }
6904          int hash = 41;
6905          hash = (19 * hash) + getDescriptorForType().hashCode();
6906          hash = (29 * hash) + getUnknownFields().hashCode();
6907          memoizedHashCode = hash;
6908          return hash;
6909        }
6910    
6911        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
6912            com.google.protobuf.ByteString data)
6913            throws com.google.protobuf.InvalidProtocolBufferException {
6914          return PARSER.parseFrom(data);
6915        }
6916        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
6917            com.google.protobuf.ByteString data,
6918            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6919            throws com.google.protobuf.InvalidProtocolBufferException {
6920          return PARSER.parseFrom(data, extensionRegistry);
6921        }
6922        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(byte[] data)
6923            throws com.google.protobuf.InvalidProtocolBufferException {
6924          return PARSER.parseFrom(data);
6925        }
6926        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
6927            byte[] data,
6928            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6929            throws com.google.protobuf.InvalidProtocolBufferException {
6930          return PARSER.parseFrom(data, extensionRegistry);
6931        }
6932        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(java.io.InputStream input)
6933            throws java.io.IOException {
6934          return PARSER.parseFrom(input);
6935        }
6936        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
6937            java.io.InputStream input,
6938            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6939            throws java.io.IOException {
6940          return PARSER.parseFrom(input, extensionRegistry);
6941        }
6942        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseDelimitedFrom(java.io.InputStream input)
6943            throws java.io.IOException {
6944          return PARSER.parseDelimitedFrom(input);
6945        }
6946        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseDelimitedFrom(
6947            java.io.InputStream input,
6948            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6949            throws java.io.IOException {
6950          return PARSER.parseDelimitedFrom(input, extensionRegistry);
6951        }
6952        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
6953            com.google.protobuf.CodedInputStream input)
6954            throws java.io.IOException {
6955          return PARSER.parseFrom(input);
6956        }
6957        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
6958            com.google.protobuf.CodedInputStream input,
6959            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6960            throws java.io.IOException {
6961          return PARSER.parseFrom(input, extensionRegistry);
6962        }
6963    
6964        public static Builder newBuilder() { return Builder.create(); }
6965        public Builder newBuilderForType() { return newBuilder(); }
6966        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto prototype) {
6967          return newBuilder().mergeFrom(prototype);
6968        }
6969        public Builder toBuilder() { return newBuilder(this); }
6970    
6971        @java.lang.Override
6972        protected Builder newBuilderForType(
6973            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
6974          Builder builder = new Builder(parent);
6975          return builder;
6976        }
6977        /**
6978         * Protobuf type {@code hadoop.hdfs.FinalizeLogSegmentResponseProto}
6979         */
6980        public static final class Builder extends
6981            com.google.protobuf.GeneratedMessage.Builder<Builder>
6982           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProtoOrBuilder {
6983          public static final com.google.protobuf.Descriptors.Descriptor
6984              getDescriptor() {
6985            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor;
6986          }
6987    
6988          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6989              internalGetFieldAccessorTable() {
6990            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_fieldAccessorTable
6991                .ensureFieldAccessorsInitialized(
6992                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.Builder.class);
6993          }
6994    
6995          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.newBuilder()
6996          private Builder() {
6997            maybeForceBuilderInitialization();
6998          }
6999    
7000          private Builder(
7001              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7002            super(parent);
7003            maybeForceBuilderInitialization();
7004          }
7005          private void maybeForceBuilderInitialization() {
7006            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
7007            }
7008          }
7009          private static Builder create() {
7010            return new Builder();
7011          }
7012    
7013          public Builder clear() {
7014            super.clear();
7015            return this;
7016          }
7017    
7018          public Builder clone() {
7019            return create().mergeFrom(buildPartial());
7020          }
7021    
7022          public com.google.protobuf.Descriptors.Descriptor
7023              getDescriptorForType() {
7024            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor;
7025          }
7026    
7027          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto getDefaultInstanceForType() {
7028            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance();
7029          }
7030    
7031          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto build() {
7032            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto result = buildPartial();
7033            if (!result.isInitialized()) {
7034              throw newUninitializedMessageException(result);
7035            }
7036            return result;
7037          }
7038    
7039          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto buildPartial() {
7040            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto(this);
7041            onBuilt();
7042            return result;
7043          }
7044    
7045          public Builder mergeFrom(com.google.protobuf.Message other) {
7046            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) {
7047              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto)other);
7048            } else {
7049              super.mergeFrom(other);
7050              return this;
7051            }
7052          }
7053    
7054          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto other) {
7055            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance()) return this;
7056            this.mergeUnknownFields(other.getUnknownFields());
7057            return this;
7058          }
7059    
7060          public final boolean isInitialized() {
7061            return true;
7062          }
7063    
7064          public Builder mergeFrom(
7065              com.google.protobuf.CodedInputStream input,
7066              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7067              throws java.io.IOException {
7068            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parsedMessage = null;
7069            try {
7070              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
7071            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
7072              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) e.getUnfinishedMessage();
7073              throw e;
7074            } finally {
7075              if (parsedMessage != null) {
7076                mergeFrom(parsedMessage);
7077              }
7078            }
7079            return this;
7080          }
7081    
7082          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FinalizeLogSegmentResponseProto)
7083        }
7084    
7085        static {
7086          defaultInstance = new FinalizeLogSegmentResponseProto(true);
7087          defaultInstance.initFields();
7088        }
7089    
7090        // @@protoc_insertion_point(class_scope:hadoop.hdfs.FinalizeLogSegmentResponseProto)
7091      }
7092    
7093      public interface PurgeLogsRequestProtoOrBuilder
7094          extends com.google.protobuf.MessageOrBuilder {
7095    
7096        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
7097        /**
7098         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7099         */
7100        boolean hasReqInfo();
7101        /**
7102         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7103         */
7104        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
7105        /**
7106         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7107         */
7108        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
7109    
7110        // required uint64 minTxIdToKeep = 2;
7111        /**
7112         * <code>required uint64 minTxIdToKeep = 2;</code>
7113         */
7114        boolean hasMinTxIdToKeep();
7115        /**
7116         * <code>required uint64 minTxIdToKeep = 2;</code>
7117         */
7118        long getMinTxIdToKeep();
7119      }
7120      /**
7121       * Protobuf type {@code hadoop.hdfs.PurgeLogsRequestProto}
7122       *
7123       * <pre>
7124       **
7125       * purgeLogs()
7126       * </pre>
7127       */
7128      public static final class PurgeLogsRequestProto extends
7129          com.google.protobuf.GeneratedMessage
7130          implements PurgeLogsRequestProtoOrBuilder {
7131        // Use PurgeLogsRequestProto.newBuilder() to construct.
7132        private PurgeLogsRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
7133          super(builder);
7134          this.unknownFields = builder.getUnknownFields();
7135        }
7136        private PurgeLogsRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
7137    
7138        private static final PurgeLogsRequestProto defaultInstance;
7139        public static PurgeLogsRequestProto getDefaultInstance() {
7140          return defaultInstance;
7141        }
7142    
7143        public PurgeLogsRequestProto getDefaultInstanceForType() {
7144          return defaultInstance;
7145        }
7146    
7147        private final com.google.protobuf.UnknownFieldSet unknownFields;
7148        @java.lang.Override
7149        public final com.google.protobuf.UnknownFieldSet
7150            getUnknownFields() {
7151          return this.unknownFields;
7152        }
7153        private PurgeLogsRequestProto(
7154            com.google.protobuf.CodedInputStream input,
7155            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7156            throws com.google.protobuf.InvalidProtocolBufferException {
7157          initFields();
7158          int mutable_bitField0_ = 0;
7159          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
7160              com.google.protobuf.UnknownFieldSet.newBuilder();
7161          try {
7162            boolean done = false;
7163            while (!done) {
7164              int tag = input.readTag();
7165              switch (tag) {
7166                case 0:
7167                  done = true;
7168                  break;
7169                default: {
7170                  if (!parseUnknownField(input, unknownFields,
7171                                         extensionRegistry, tag)) {
7172                    done = true;
7173                  }
7174                  break;
7175                }
7176                case 10: {
7177                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
7178                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
7179                    subBuilder = reqInfo_.toBuilder();
7180                  }
7181                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
7182                  if (subBuilder != null) {
7183                    subBuilder.mergeFrom(reqInfo_);
7184                    reqInfo_ = subBuilder.buildPartial();
7185                  }
7186                  bitField0_ |= 0x00000001;
7187                  break;
7188                }
7189                case 16: {
7190                  bitField0_ |= 0x00000002;
7191                  minTxIdToKeep_ = input.readUInt64();
7192                  break;
7193                }
7194              }
7195            }
7196          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
7197            throw e.setUnfinishedMessage(this);
7198          } catch (java.io.IOException e) {
7199            throw new com.google.protobuf.InvalidProtocolBufferException(
7200                e.getMessage()).setUnfinishedMessage(this);
7201          } finally {
7202            this.unknownFields = unknownFields.build();
7203            makeExtensionsImmutable();
7204          }
7205        }
7206        public static final com.google.protobuf.Descriptors.Descriptor
7207            getDescriptor() {
7208          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor;
7209        }
7210    
7211        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7212            internalGetFieldAccessorTable() {
7213          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsRequestProto_fieldAccessorTable
7214              .ensureFieldAccessorsInitialized(
7215                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.Builder.class);
7216        }
7217    
7218        public static com.google.protobuf.Parser<PurgeLogsRequestProto> PARSER =
7219            new com.google.protobuf.AbstractParser<PurgeLogsRequestProto>() {
7220          public PurgeLogsRequestProto parsePartialFrom(
7221              com.google.protobuf.CodedInputStream input,
7222              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7223              throws com.google.protobuf.InvalidProtocolBufferException {
7224            return new PurgeLogsRequestProto(input, extensionRegistry);
7225          }
7226        };
7227    
7228        @java.lang.Override
7229        public com.google.protobuf.Parser<PurgeLogsRequestProto> getParserForType() {
7230          return PARSER;
7231        }
7232    
7233        private int bitField0_;
7234        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
7235        public static final int REQINFO_FIELD_NUMBER = 1;
7236        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
7237        /**
7238         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7239         */
7240        public boolean hasReqInfo() {
7241          return ((bitField0_ & 0x00000001) == 0x00000001);
7242        }
7243        /**
7244         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7245         */
7246        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
7247          return reqInfo_;
7248        }
7249        /**
7250         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7251         */
7252        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
7253          return reqInfo_;
7254        }
7255    
7256        // required uint64 minTxIdToKeep = 2;
7257        public static final int MINTXIDTOKEEP_FIELD_NUMBER = 2;
7258        private long minTxIdToKeep_;
7259        /**
7260         * <code>required uint64 minTxIdToKeep = 2;</code>
7261         */
7262        public boolean hasMinTxIdToKeep() {
7263          return ((bitField0_ & 0x00000002) == 0x00000002);
7264        }
7265        /**
7266         * <code>required uint64 minTxIdToKeep = 2;</code>
7267         */
7268        public long getMinTxIdToKeep() {
7269          return minTxIdToKeep_;
7270        }
7271    
7272        private void initFields() {
7273          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
7274          minTxIdToKeep_ = 0L;
7275        }
7276        private byte memoizedIsInitialized = -1;
7277        public final boolean isInitialized() {
7278          byte isInitialized = memoizedIsInitialized;
7279          if (isInitialized != -1) return isInitialized == 1;
7280    
7281          if (!hasReqInfo()) {
7282            memoizedIsInitialized = 0;
7283            return false;
7284          }
7285          if (!hasMinTxIdToKeep()) {
7286            memoizedIsInitialized = 0;
7287            return false;
7288          }
7289          if (!getReqInfo().isInitialized()) {
7290            memoizedIsInitialized = 0;
7291            return false;
7292          }
7293          memoizedIsInitialized = 1;
7294          return true;
7295        }
7296    
7297        public void writeTo(com.google.protobuf.CodedOutputStream output)
7298                            throws java.io.IOException {
7299          getSerializedSize();
7300          if (((bitField0_ & 0x00000001) == 0x00000001)) {
7301            output.writeMessage(1, reqInfo_);
7302          }
7303          if (((bitField0_ & 0x00000002) == 0x00000002)) {
7304            output.writeUInt64(2, minTxIdToKeep_);
7305          }
7306          getUnknownFields().writeTo(output);
7307        }
7308    
7309        private int memoizedSerializedSize = -1;
7310        public int getSerializedSize() {
7311          int size = memoizedSerializedSize;
7312          if (size != -1) return size;
7313    
7314          size = 0;
7315          if (((bitField0_ & 0x00000001) == 0x00000001)) {
7316            size += com.google.protobuf.CodedOutputStream
7317              .computeMessageSize(1, reqInfo_);
7318          }
7319          if (((bitField0_ & 0x00000002) == 0x00000002)) {
7320            size += com.google.protobuf.CodedOutputStream
7321              .computeUInt64Size(2, minTxIdToKeep_);
7322          }
7323          size += getUnknownFields().getSerializedSize();
7324          memoizedSerializedSize = size;
7325          return size;
7326        }
7327    
7328        private static final long serialVersionUID = 0L;
7329        @java.lang.Override
7330        protected java.lang.Object writeReplace()
7331            throws java.io.ObjectStreamException {
7332          return super.writeReplace();
7333        }
7334    
7335        @java.lang.Override
7336        public boolean equals(final java.lang.Object obj) {
7337          if (obj == this) {
7338           return true;
7339          }
7340          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)) {
7341            return super.equals(obj);
7342          }
7343          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto) obj;
7344    
7345          boolean result = true;
7346          result = result && (hasReqInfo() == other.hasReqInfo());
7347          if (hasReqInfo()) {
7348            result = result && getReqInfo()
7349                .equals(other.getReqInfo());
7350          }
7351          result = result && (hasMinTxIdToKeep() == other.hasMinTxIdToKeep());
7352          if (hasMinTxIdToKeep()) {
7353            result = result && (getMinTxIdToKeep()
7354                == other.getMinTxIdToKeep());
7355          }
7356          result = result &&
7357              getUnknownFields().equals(other.getUnknownFields());
7358          return result;
7359        }
7360    
7361        private int memoizedHashCode = 0;
7362        @java.lang.Override
7363        public int hashCode() {
7364          if (memoizedHashCode != 0) {
7365            return memoizedHashCode;
7366          }
7367          int hash = 41;
7368          hash = (19 * hash) + getDescriptorForType().hashCode();
7369          if (hasReqInfo()) {
7370            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
7371            hash = (53 * hash) + getReqInfo().hashCode();
7372          }
7373          if (hasMinTxIdToKeep()) {
7374            hash = (37 * hash) + MINTXIDTOKEEP_FIELD_NUMBER;
7375            hash = (53 * hash) + hashLong(getMinTxIdToKeep());
7376          }
7377          hash = (29 * hash) + getUnknownFields().hashCode();
7378          memoizedHashCode = hash;
7379          return hash;
7380        }
7381    
7382        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7383            com.google.protobuf.ByteString data)
7384            throws com.google.protobuf.InvalidProtocolBufferException {
7385          return PARSER.parseFrom(data);
7386        }
7387        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7388            com.google.protobuf.ByteString data,
7389            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7390            throws com.google.protobuf.InvalidProtocolBufferException {
7391          return PARSER.parseFrom(data, extensionRegistry);
7392        }
7393        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(byte[] data)
7394            throws com.google.protobuf.InvalidProtocolBufferException {
7395          return PARSER.parseFrom(data);
7396        }
7397        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7398            byte[] data,
7399            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7400            throws com.google.protobuf.InvalidProtocolBufferException {
7401          return PARSER.parseFrom(data, extensionRegistry);
7402        }
7403        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(java.io.InputStream input)
7404            throws java.io.IOException {
7405          return PARSER.parseFrom(input);
7406        }
7407        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7408            java.io.InputStream input,
7409            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7410            throws java.io.IOException {
7411          return PARSER.parseFrom(input, extensionRegistry);
7412        }
7413        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseDelimitedFrom(java.io.InputStream input)
7414            throws java.io.IOException {
7415          return PARSER.parseDelimitedFrom(input);
7416        }
7417        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseDelimitedFrom(
7418            java.io.InputStream input,
7419            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7420            throws java.io.IOException {
7421          return PARSER.parseDelimitedFrom(input, extensionRegistry);
7422        }
7423        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7424            com.google.protobuf.CodedInputStream input)
7425            throws java.io.IOException {
7426          return PARSER.parseFrom(input);
7427        }
7428        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7429            com.google.protobuf.CodedInputStream input,
7430            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7431            throws java.io.IOException {
7432          return PARSER.parseFrom(input, extensionRegistry);
7433        }
7434    
7435        public static Builder newBuilder() { return Builder.create(); }
7436        public Builder newBuilderForType() { return newBuilder(); }
7437        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto prototype) {
7438          return newBuilder().mergeFrom(prototype);
7439        }
7440        public Builder toBuilder() { return newBuilder(this); }
7441    
7442        @java.lang.Override
7443        protected Builder newBuilderForType(
7444            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7445          Builder builder = new Builder(parent);
7446          return builder;
7447        }
7448        /**
7449         * Protobuf type {@code hadoop.hdfs.PurgeLogsRequestProto}
7450         *
7451         * <pre>
7452         **
7453         * purgeLogs()
7454         * </pre>
7455         */
7456        public static final class Builder extends
7457            com.google.protobuf.GeneratedMessage.Builder<Builder>
7458           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProtoOrBuilder {
7459          public static final com.google.protobuf.Descriptors.Descriptor
7460              getDescriptor() {
7461            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor;
7462          }
7463    
7464          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7465              internalGetFieldAccessorTable() {
7466            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsRequestProto_fieldAccessorTable
7467                .ensureFieldAccessorsInitialized(
7468                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.Builder.class);
7469          }
7470    
7471          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.newBuilder()
7472          private Builder() {
7473            maybeForceBuilderInitialization();
7474          }
7475    
7476          private Builder(
7477              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7478            super(parent);
7479            maybeForceBuilderInitialization();
7480          }
7481          private void maybeForceBuilderInitialization() {
7482            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
7483              getReqInfoFieldBuilder();
7484            }
7485          }
7486          private static Builder create() {
7487            return new Builder();
7488          }
7489    
7490          public Builder clear() {
7491            super.clear();
7492            if (reqInfoBuilder_ == null) {
7493              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
7494            } else {
7495              reqInfoBuilder_.clear();
7496            }
7497            bitField0_ = (bitField0_ & ~0x00000001);
7498            minTxIdToKeep_ = 0L;
7499            bitField0_ = (bitField0_ & ~0x00000002);
7500            return this;
7501          }
7502    
7503          public Builder clone() {
7504            return create().mergeFrom(buildPartial());
7505          }
7506    
7507          public com.google.protobuf.Descriptors.Descriptor
7508              getDescriptorForType() {
7509            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor;
7510          }
7511    
7512          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto getDefaultInstanceForType() {
7513            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance();
7514          }
7515    
7516          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto build() {
7517            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto result = buildPartial();
7518            if (!result.isInitialized()) {
7519              throw newUninitializedMessageException(result);
7520            }
7521            return result;
7522          }
7523    
7524          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto buildPartial() {
7525            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto(this);
7526            int from_bitField0_ = bitField0_;
7527            int to_bitField0_ = 0;
7528            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
7529              to_bitField0_ |= 0x00000001;
7530            }
7531            if (reqInfoBuilder_ == null) {
7532              result.reqInfo_ = reqInfo_;
7533            } else {
7534              result.reqInfo_ = reqInfoBuilder_.build();
7535            }
7536            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
7537              to_bitField0_ |= 0x00000002;
7538            }
7539            result.minTxIdToKeep_ = minTxIdToKeep_;
7540            result.bitField0_ = to_bitField0_;
7541            onBuilt();
7542            return result;
7543          }
7544    
7545          public Builder mergeFrom(com.google.protobuf.Message other) {
7546            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto) {
7547              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)other);
7548            } else {
7549              super.mergeFrom(other);
7550              return this;
7551            }
7552          }
7553    
7554          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto other) {
7555            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance()) return this;
7556            if (other.hasReqInfo()) {
7557              mergeReqInfo(other.getReqInfo());
7558            }
7559            if (other.hasMinTxIdToKeep()) {
7560              setMinTxIdToKeep(other.getMinTxIdToKeep());
7561            }
7562            this.mergeUnknownFields(other.getUnknownFields());
7563            return this;
7564          }
7565    
7566          public final boolean isInitialized() {
7567            if (!hasReqInfo()) {
7568              
7569              return false;
7570            }
7571            if (!hasMinTxIdToKeep()) {
7572              
7573              return false;
7574            }
7575            if (!getReqInfo().isInitialized()) {
7576              
7577              return false;
7578            }
7579            return true;
7580          }
7581    
7582          public Builder mergeFrom(
7583              com.google.protobuf.CodedInputStream input,
7584              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7585              throws java.io.IOException {
7586            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parsedMessage = null;
7587            try {
7588              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
7589            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
7590              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto) e.getUnfinishedMessage();
7591              throw e;
7592            } finally {
7593              if (parsedMessage != null) {
7594                mergeFrom(parsedMessage);
7595              }
7596            }
7597            return this;
7598          }
7599          private int bitField0_;
7600    
7601          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
7602          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
7603          private com.google.protobuf.SingleFieldBuilder<
7604              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
7605          /**
7606           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7607           */
7608          public boolean hasReqInfo() {
7609            return ((bitField0_ & 0x00000001) == 0x00000001);
7610          }
7611          /**
7612           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7613           */
7614          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
7615            if (reqInfoBuilder_ == null) {
7616              return reqInfo_;
7617            } else {
7618              return reqInfoBuilder_.getMessage();
7619            }
7620          }
7621          /**
7622           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7623           */
7624          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
7625            if (reqInfoBuilder_ == null) {
7626              if (value == null) {
7627                throw new NullPointerException();
7628              }
7629              reqInfo_ = value;
7630              onChanged();
7631            } else {
7632              reqInfoBuilder_.setMessage(value);
7633            }
7634            bitField0_ |= 0x00000001;
7635            return this;
7636          }
7637          /**
7638           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7639           */
7640          public Builder setReqInfo(
7641              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
7642            if (reqInfoBuilder_ == null) {
7643              reqInfo_ = builderForValue.build();
7644              onChanged();
7645            } else {
7646              reqInfoBuilder_.setMessage(builderForValue.build());
7647            }
7648            bitField0_ |= 0x00000001;
7649            return this;
7650          }
7651          /**
7652           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7653           */
7654          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
7655            if (reqInfoBuilder_ == null) {
7656              if (((bitField0_ & 0x00000001) == 0x00000001) &&
7657                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
7658                reqInfo_ =
7659                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
7660              } else {
7661                reqInfo_ = value;
7662              }
7663              onChanged();
7664            } else {
7665              reqInfoBuilder_.mergeFrom(value);
7666            }
7667            bitField0_ |= 0x00000001;
7668            return this;
7669          }
7670          /**
7671           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7672           */
7673          public Builder clearReqInfo() {
7674            if (reqInfoBuilder_ == null) {
7675              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
7676              onChanged();
7677            } else {
7678              reqInfoBuilder_.clear();
7679            }
7680            bitField0_ = (bitField0_ & ~0x00000001);
7681            return this;
7682          }
7683          /**
7684           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7685           */
7686          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
7687            bitField0_ |= 0x00000001;
7688            onChanged();
7689            return getReqInfoFieldBuilder().getBuilder();
7690          }
7691          /**
7692           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7693           */
7694          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
7695            if (reqInfoBuilder_ != null) {
7696              return reqInfoBuilder_.getMessageOrBuilder();
7697            } else {
7698              return reqInfo_;
7699            }
7700          }
7701          /**
7702           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7703           */
7704          private com.google.protobuf.SingleFieldBuilder<
7705              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
7706              getReqInfoFieldBuilder() {
7707            if (reqInfoBuilder_ == null) {
7708              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
7709                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
7710                      reqInfo_,
7711                      getParentForChildren(),
7712                      isClean());
7713              reqInfo_ = null;
7714            }
7715            return reqInfoBuilder_;
7716          }
7717    
7718          // required uint64 minTxIdToKeep = 2;
7719          private long minTxIdToKeep_ ;
7720          /**
7721           * <code>required uint64 minTxIdToKeep = 2;</code>
7722           */
7723          public boolean hasMinTxIdToKeep() {
7724            return ((bitField0_ & 0x00000002) == 0x00000002);
7725          }
7726          /**
7727           * <code>required uint64 minTxIdToKeep = 2;</code>
7728           */
7729          public long getMinTxIdToKeep() {
7730            return minTxIdToKeep_;
7731          }
7732          /**
7733           * <code>required uint64 minTxIdToKeep = 2;</code>
7734           */
7735          public Builder setMinTxIdToKeep(long value) {
7736            bitField0_ |= 0x00000002;
7737            minTxIdToKeep_ = value;
7738            onChanged();
7739            return this;
7740          }
7741          /**
7742           * <code>required uint64 minTxIdToKeep = 2;</code>
7743           */
7744          public Builder clearMinTxIdToKeep() {
7745            bitField0_ = (bitField0_ & ~0x00000002);
7746            minTxIdToKeep_ = 0L;
7747            onChanged();
7748            return this;
7749          }
7750    
7751          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PurgeLogsRequestProto)
7752        }
7753    
7754        static {
7755          defaultInstance = new PurgeLogsRequestProto(true);
7756          defaultInstance.initFields();
7757        }
7758    
7759        // @@protoc_insertion_point(class_scope:hadoop.hdfs.PurgeLogsRequestProto)
7760      }
7761    
7762      public interface PurgeLogsResponseProtoOrBuilder
7763          extends com.google.protobuf.MessageOrBuilder {
7764      }
7765      /**
7766       * Protobuf type {@code hadoop.hdfs.PurgeLogsResponseProto}
7767       */
7768      public static final class PurgeLogsResponseProto extends
7769          com.google.protobuf.GeneratedMessage
7770          implements PurgeLogsResponseProtoOrBuilder {
7771        // Use PurgeLogsResponseProto.newBuilder() to construct.
7772        private PurgeLogsResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
7773          super(builder);
7774          this.unknownFields = builder.getUnknownFields();
7775        }
7776        private PurgeLogsResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
7777    
7778        private static final PurgeLogsResponseProto defaultInstance;
7779        public static PurgeLogsResponseProto getDefaultInstance() {
7780          return defaultInstance;
7781        }
7782    
7783        public PurgeLogsResponseProto getDefaultInstanceForType() {
7784          return defaultInstance;
7785        }
7786    
7787        private final com.google.protobuf.UnknownFieldSet unknownFields;
7788        @java.lang.Override
7789        public final com.google.protobuf.UnknownFieldSet
7790            getUnknownFields() {
7791          return this.unknownFields;
7792        }
7793        private PurgeLogsResponseProto(
7794            com.google.protobuf.CodedInputStream input,
7795            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7796            throws com.google.protobuf.InvalidProtocolBufferException {
7797          initFields();
7798          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
7799              com.google.protobuf.UnknownFieldSet.newBuilder();
7800          try {
7801            boolean done = false;
7802            while (!done) {
7803              int tag = input.readTag();
7804              switch (tag) {
7805                case 0:
7806                  done = true;
7807                  break;
7808                default: {
7809                  if (!parseUnknownField(input, unknownFields,
7810                                         extensionRegistry, tag)) {
7811                    done = true;
7812                  }
7813                  break;
7814                }
7815              }
7816            }
7817          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
7818            throw e.setUnfinishedMessage(this);
7819          } catch (java.io.IOException e) {
7820            throw new com.google.protobuf.InvalidProtocolBufferException(
7821                e.getMessage()).setUnfinishedMessage(this);
7822          } finally {
7823            this.unknownFields = unknownFields.build();
7824            makeExtensionsImmutable();
7825          }
7826        }
7827        public static final com.google.protobuf.Descriptors.Descriptor
7828            getDescriptor() {
7829          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor;
7830        }
7831    
7832        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7833            internalGetFieldAccessorTable() {
7834          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsResponseProto_fieldAccessorTable
7835              .ensureFieldAccessorsInitialized(
7836                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.Builder.class);
7837        }
7838    
7839        public static com.google.protobuf.Parser<PurgeLogsResponseProto> PARSER =
7840            new com.google.protobuf.AbstractParser<PurgeLogsResponseProto>() {
7841          public PurgeLogsResponseProto parsePartialFrom(
7842              com.google.protobuf.CodedInputStream input,
7843              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7844              throws com.google.protobuf.InvalidProtocolBufferException {
7845            return new PurgeLogsResponseProto(input, extensionRegistry);
7846          }
7847        };
7848    
7849        @java.lang.Override
7850        public com.google.protobuf.Parser<PurgeLogsResponseProto> getParserForType() {
7851          return PARSER;
7852        }
7853    
7854        private void initFields() {
7855        }
7856        private byte memoizedIsInitialized = -1;
7857        public final boolean isInitialized() {
7858          byte isInitialized = memoizedIsInitialized;
7859          if (isInitialized != -1) return isInitialized == 1;
7860    
7861          memoizedIsInitialized = 1;
7862          return true;
7863        }
7864    
7865        public void writeTo(com.google.protobuf.CodedOutputStream output)
7866                            throws java.io.IOException {
7867          getSerializedSize();
7868          getUnknownFields().writeTo(output);
7869        }
7870    
7871        private int memoizedSerializedSize = -1;
7872        public int getSerializedSize() {
7873          int size = memoizedSerializedSize;
7874          if (size != -1) return size;
7875    
7876          size = 0;
7877          size += getUnknownFields().getSerializedSize();
7878          memoizedSerializedSize = size;
7879          return size;
7880        }
7881    
7882        private static final long serialVersionUID = 0L;
7883        @java.lang.Override
7884        protected java.lang.Object writeReplace()
7885            throws java.io.ObjectStreamException {
7886          return super.writeReplace();
7887        }
7888    
7889        @java.lang.Override
7890        public boolean equals(final java.lang.Object obj) {
7891          if (obj == this) {
7892           return true;
7893          }
7894          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto)) {
7895            return super.equals(obj);
7896          }
7897          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) obj;
7898    
7899          boolean result = true;
7900          result = result &&
7901              getUnknownFields().equals(other.getUnknownFields());
7902          return result;
7903        }
7904    
7905        private int memoizedHashCode = 0;
7906        @java.lang.Override
7907        public int hashCode() {
7908          if (memoizedHashCode != 0) {
7909            return memoizedHashCode;
7910          }
7911          int hash = 41;
7912          hash = (19 * hash) + getDescriptorForType().hashCode();
7913          hash = (29 * hash) + getUnknownFields().hashCode();
7914          memoizedHashCode = hash;
7915          return hash;
7916        }
7917    
7918        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
7919            com.google.protobuf.ByteString data)
7920            throws com.google.protobuf.InvalidProtocolBufferException {
7921          return PARSER.parseFrom(data);
7922        }
7923        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
7924            com.google.protobuf.ByteString data,
7925            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7926            throws com.google.protobuf.InvalidProtocolBufferException {
7927          return PARSER.parseFrom(data, extensionRegistry);
7928        }
7929        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(byte[] data)
7930            throws com.google.protobuf.InvalidProtocolBufferException {
7931          return PARSER.parseFrom(data);
7932        }
7933        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
7934            byte[] data,
7935            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7936            throws com.google.protobuf.InvalidProtocolBufferException {
7937          return PARSER.parseFrom(data, extensionRegistry);
7938        }
7939        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(java.io.InputStream input)
7940            throws java.io.IOException {
7941          return PARSER.parseFrom(input);
7942        }
7943        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
7944            java.io.InputStream input,
7945            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7946            throws java.io.IOException {
7947          return PARSER.parseFrom(input, extensionRegistry);
7948        }
7949        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseDelimitedFrom(java.io.InputStream input)
7950            throws java.io.IOException {
7951          return PARSER.parseDelimitedFrom(input);
7952        }
7953        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseDelimitedFrom(
7954            java.io.InputStream input,
7955            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7956            throws java.io.IOException {
7957          return PARSER.parseDelimitedFrom(input, extensionRegistry);
7958        }
7959        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
7960            com.google.protobuf.CodedInputStream input)
7961            throws java.io.IOException {
7962          return PARSER.parseFrom(input);
7963        }
7964        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
7965            com.google.protobuf.CodedInputStream input,
7966            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7967            throws java.io.IOException {
7968          return PARSER.parseFrom(input, extensionRegistry);
7969        }
7970    
7971        public static Builder newBuilder() { return Builder.create(); }
7972        public Builder newBuilderForType() { return newBuilder(); }
7973        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto prototype) {
7974          return newBuilder().mergeFrom(prototype);
7975        }
7976        public Builder toBuilder() { return newBuilder(this); }
7977    
7978        @java.lang.Override
7979        protected Builder newBuilderForType(
7980            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7981          Builder builder = new Builder(parent);
7982          return builder;
7983        }
7984        /**
7985         * Protobuf type {@code hadoop.hdfs.PurgeLogsResponseProto}
7986         */
7987        public static final class Builder extends
7988            com.google.protobuf.GeneratedMessage.Builder<Builder>
7989           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProtoOrBuilder {
7990          public static final com.google.protobuf.Descriptors.Descriptor
7991              getDescriptor() {
7992            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor;
7993          }
7994    
7995          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7996              internalGetFieldAccessorTable() {
7997            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsResponseProto_fieldAccessorTable
7998                .ensureFieldAccessorsInitialized(
7999                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.Builder.class);
8000          }
8001    
8002          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.newBuilder()
8003          private Builder() {
8004            maybeForceBuilderInitialization();
8005          }
8006    
8007          private Builder(
8008              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8009            super(parent);
8010            maybeForceBuilderInitialization();
8011          }
8012          private void maybeForceBuilderInitialization() {
8013            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
8014            }
8015          }
8016          private static Builder create() {
8017            return new Builder();
8018          }
8019    
8020          public Builder clear() {
8021            super.clear();
8022            return this;
8023          }
8024    
8025          public Builder clone() {
8026            return create().mergeFrom(buildPartial());
8027          }
8028    
8029          public com.google.protobuf.Descriptors.Descriptor
8030              getDescriptorForType() {
8031            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor;
8032          }
8033    
8034          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto getDefaultInstanceForType() {
8035            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance();
8036          }
8037    
8038          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto build() {
8039            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto result = buildPartial();
8040            if (!result.isInitialized()) {
8041              throw newUninitializedMessageException(result);
8042            }
8043            return result;
8044          }
8045    
8046          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto buildPartial() {
8047            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto(this);
8048            onBuilt();
8049            return result;
8050          }
8051    
8052          public Builder mergeFrom(com.google.protobuf.Message other) {
8053            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) {
8054              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto)other);
8055            } else {
8056              super.mergeFrom(other);
8057              return this;
8058            }
8059          }
8060    
8061          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto other) {
8062            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance()) return this;
8063            this.mergeUnknownFields(other.getUnknownFields());
8064            return this;
8065          }
8066    
8067          public final boolean isInitialized() {
8068            return true;
8069          }
8070    
8071          public Builder mergeFrom(
8072              com.google.protobuf.CodedInputStream input,
8073              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8074              throws java.io.IOException {
8075            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parsedMessage = null;
8076            try {
8077              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
8078            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
8079              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) e.getUnfinishedMessage();
8080              throw e;
8081            } finally {
8082              if (parsedMessage != null) {
8083                mergeFrom(parsedMessage);
8084              }
8085            }
8086            return this;
8087          }
8088    
8089          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PurgeLogsResponseProto)
8090        }
8091    
8092        static {
8093          defaultInstance = new PurgeLogsResponseProto(true);
8094          defaultInstance.initFields();
8095        }
8096    
8097        // @@protoc_insertion_point(class_scope:hadoop.hdfs.PurgeLogsResponseProto)
8098      }
8099    
8100      public interface IsFormattedRequestProtoOrBuilder
8101          extends com.google.protobuf.MessageOrBuilder {
8102    
8103        // required .hadoop.hdfs.JournalIdProto jid = 1;
8104        /**
8105         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8106         */
8107        boolean hasJid();
8108        /**
8109         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8110         */
8111        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
8112        /**
8113         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8114         */
8115        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
8116      }
8117      /**
8118       * Protobuf type {@code hadoop.hdfs.IsFormattedRequestProto}
8119       *
8120       * <pre>
8121       **
8122       * isFormatted()
8123       * </pre>
8124       */
8125      public static final class IsFormattedRequestProto extends
8126          com.google.protobuf.GeneratedMessage
8127          implements IsFormattedRequestProtoOrBuilder {
8128        // Use IsFormattedRequestProto.newBuilder() to construct.
8129        private IsFormattedRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
8130          super(builder);
8131          this.unknownFields = builder.getUnknownFields();
8132        }
8133        private IsFormattedRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
8134    
8135        private static final IsFormattedRequestProto defaultInstance;
8136        public static IsFormattedRequestProto getDefaultInstance() {
8137          return defaultInstance;
8138        }
8139    
8140        public IsFormattedRequestProto getDefaultInstanceForType() {
8141          return defaultInstance;
8142        }
8143    
8144        private final com.google.protobuf.UnknownFieldSet unknownFields;
8145        @java.lang.Override
8146        public final com.google.protobuf.UnknownFieldSet
8147            getUnknownFields() {
8148          return this.unknownFields;
8149        }
8150        private IsFormattedRequestProto(
8151            com.google.protobuf.CodedInputStream input,
8152            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8153            throws com.google.protobuf.InvalidProtocolBufferException {
8154          initFields();
8155          int mutable_bitField0_ = 0;
8156          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
8157              com.google.protobuf.UnknownFieldSet.newBuilder();
8158          try {
8159            boolean done = false;
8160            while (!done) {
8161              int tag = input.readTag();
8162              switch (tag) {
8163                case 0:
8164                  done = true;
8165                  break;
8166                default: {
8167                  if (!parseUnknownField(input, unknownFields,
8168                                         extensionRegistry, tag)) {
8169                    done = true;
8170                  }
8171                  break;
8172                }
8173                case 10: {
8174                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
8175                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
8176                    subBuilder = jid_.toBuilder();
8177                  }
8178                  jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
8179                  if (subBuilder != null) {
8180                    subBuilder.mergeFrom(jid_);
8181                    jid_ = subBuilder.buildPartial();
8182                  }
8183                  bitField0_ |= 0x00000001;
8184                  break;
8185                }
8186              }
8187            }
8188          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
8189            throw e.setUnfinishedMessage(this);
8190          } catch (java.io.IOException e) {
8191            throw new com.google.protobuf.InvalidProtocolBufferException(
8192                e.getMessage()).setUnfinishedMessage(this);
8193          } finally {
8194            this.unknownFields = unknownFields.build();
8195            makeExtensionsImmutable();
8196          }
8197        }
8198        public static final com.google.protobuf.Descriptors.Descriptor
8199            getDescriptor() {
8200          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor;
8201        }
8202    
8203        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
8204            internalGetFieldAccessorTable() {
8205          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedRequestProto_fieldAccessorTable
8206              .ensureFieldAccessorsInitialized(
8207                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.Builder.class);
8208        }
8209    
8210        public static com.google.protobuf.Parser<IsFormattedRequestProto> PARSER =
8211            new com.google.protobuf.AbstractParser<IsFormattedRequestProto>() {
8212          public IsFormattedRequestProto parsePartialFrom(
8213              com.google.protobuf.CodedInputStream input,
8214              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8215              throws com.google.protobuf.InvalidProtocolBufferException {
8216            return new IsFormattedRequestProto(input, extensionRegistry);
8217          }
8218        };
8219    
8220        @java.lang.Override
8221        public com.google.protobuf.Parser<IsFormattedRequestProto> getParserForType() {
8222          return PARSER;
8223        }
8224    
8225        private int bitField0_;
8226        // required .hadoop.hdfs.JournalIdProto jid = 1;
8227        public static final int JID_FIELD_NUMBER = 1;
8228        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
8229        /**
8230         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8231         */
8232        public boolean hasJid() {
8233          return ((bitField0_ & 0x00000001) == 0x00000001);
8234        }
8235        /**
8236         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8237         */
8238        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
8239          return jid_;
8240        }
8241        /**
8242         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8243         */
8244        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
8245          return jid_;
8246        }
8247    
8248        private void initFields() {
8249          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
8250        }
8251        private byte memoizedIsInitialized = -1;
8252        public final boolean isInitialized() {
8253          byte isInitialized = memoizedIsInitialized;
8254          if (isInitialized != -1) return isInitialized == 1;
8255    
8256          if (!hasJid()) {
8257            memoizedIsInitialized = 0;
8258            return false;
8259          }
8260          if (!getJid().isInitialized()) {
8261            memoizedIsInitialized = 0;
8262            return false;
8263          }
8264          memoizedIsInitialized = 1;
8265          return true;
8266        }
8267    
8268        public void writeTo(com.google.protobuf.CodedOutputStream output)
8269                            throws java.io.IOException {
8270          getSerializedSize();
8271          if (((bitField0_ & 0x00000001) == 0x00000001)) {
8272            output.writeMessage(1, jid_);
8273          }
8274          getUnknownFields().writeTo(output);
8275        }
8276    
8277        private int memoizedSerializedSize = -1;
8278        public int getSerializedSize() {
8279          int size = memoizedSerializedSize;
8280          if (size != -1) return size;
8281    
8282          size = 0;
8283          if (((bitField0_ & 0x00000001) == 0x00000001)) {
8284            size += com.google.protobuf.CodedOutputStream
8285              .computeMessageSize(1, jid_);
8286          }
8287          size += getUnknownFields().getSerializedSize();
8288          memoizedSerializedSize = size;
8289          return size;
8290        }
8291    
8292        private static final long serialVersionUID = 0L;
8293        @java.lang.Override
8294        protected java.lang.Object writeReplace()
8295            throws java.io.ObjectStreamException {
8296          return super.writeReplace();
8297        }
8298    
8299        @java.lang.Override
8300        public boolean equals(final java.lang.Object obj) {
8301          if (obj == this) {
8302           return true;
8303          }
8304          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)) {
8305            return super.equals(obj);
8306          }
8307          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto) obj;
8308    
8309          boolean result = true;
8310          result = result && (hasJid() == other.hasJid());
8311          if (hasJid()) {
8312            result = result && getJid()
8313                .equals(other.getJid());
8314          }
8315          result = result &&
8316              getUnknownFields().equals(other.getUnknownFields());
8317          return result;
8318        }
8319    
8320        private int memoizedHashCode = 0;
8321        @java.lang.Override
8322        public int hashCode() {
8323          if (memoizedHashCode != 0) {
8324            return memoizedHashCode;
8325          }
8326          int hash = 41;
8327          hash = (19 * hash) + getDescriptorForType().hashCode();
8328          if (hasJid()) {
8329            hash = (37 * hash) + JID_FIELD_NUMBER;
8330            hash = (53 * hash) + getJid().hashCode();
8331          }
8332          hash = (29 * hash) + getUnknownFields().hashCode();
8333          memoizedHashCode = hash;
8334          return hash;
8335        }
8336    
8337        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8338            com.google.protobuf.ByteString data)
8339            throws com.google.protobuf.InvalidProtocolBufferException {
8340          return PARSER.parseFrom(data);
8341        }
8342        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8343            com.google.protobuf.ByteString data,
8344            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8345            throws com.google.protobuf.InvalidProtocolBufferException {
8346          return PARSER.parseFrom(data, extensionRegistry);
8347        }
8348        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(byte[] data)
8349            throws com.google.protobuf.InvalidProtocolBufferException {
8350          return PARSER.parseFrom(data);
8351        }
8352        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8353            byte[] data,
8354            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8355            throws com.google.protobuf.InvalidProtocolBufferException {
8356          return PARSER.parseFrom(data, extensionRegistry);
8357        }
8358        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(java.io.InputStream input)
8359            throws java.io.IOException {
8360          return PARSER.parseFrom(input);
8361        }
8362        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8363            java.io.InputStream input,
8364            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8365            throws java.io.IOException {
8366          return PARSER.parseFrom(input, extensionRegistry);
8367        }
8368        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseDelimitedFrom(java.io.InputStream input)
8369            throws java.io.IOException {
8370          return PARSER.parseDelimitedFrom(input);
8371        }
8372        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseDelimitedFrom(
8373            java.io.InputStream input,
8374            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8375            throws java.io.IOException {
8376          return PARSER.parseDelimitedFrom(input, extensionRegistry);
8377        }
8378        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8379            com.google.protobuf.CodedInputStream input)
8380            throws java.io.IOException {
8381          return PARSER.parseFrom(input);
8382        }
8383        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8384            com.google.protobuf.CodedInputStream input,
8385            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8386            throws java.io.IOException {
8387          return PARSER.parseFrom(input, extensionRegistry);
8388        }
8389    
8390        public static Builder newBuilder() { return Builder.create(); }
8391        public Builder newBuilderForType() { return newBuilder(); }
8392        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto prototype) {
8393          return newBuilder().mergeFrom(prototype);
8394        }
8395        public Builder toBuilder() { return newBuilder(this); }
8396    
8397        @java.lang.Override
8398        protected Builder newBuilderForType(
8399            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8400          Builder builder = new Builder(parent);
8401          return builder;
8402        }
8403        /**
8404         * Protobuf type {@code hadoop.hdfs.IsFormattedRequestProto}
8405         *
8406         * <pre>
8407         **
8408         * isFormatted()
8409         * </pre>
8410         */
8411        public static final class Builder extends
8412            com.google.protobuf.GeneratedMessage.Builder<Builder>
8413           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProtoOrBuilder {
8414          public static final com.google.protobuf.Descriptors.Descriptor
8415              getDescriptor() {
8416            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor;
8417          }
8418    
8419          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
8420              internalGetFieldAccessorTable() {
8421            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedRequestProto_fieldAccessorTable
8422                .ensureFieldAccessorsInitialized(
8423                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.Builder.class);
8424          }
8425    
8426          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.newBuilder()
8427          private Builder() {
8428            maybeForceBuilderInitialization();
8429          }
8430    
8431          private Builder(
8432              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8433            super(parent);
8434            maybeForceBuilderInitialization();
8435          }
8436          private void maybeForceBuilderInitialization() {
8437            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
8438              getJidFieldBuilder();
8439            }
8440          }
8441          private static Builder create() {
8442            return new Builder();
8443          }
8444    
8445          public Builder clear() {
8446            super.clear();
8447            if (jidBuilder_ == null) {
8448              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
8449            } else {
8450              jidBuilder_.clear();
8451            }
8452            bitField0_ = (bitField0_ & ~0x00000001);
8453            return this;
8454          }
8455    
8456          public Builder clone() {
8457            return create().mergeFrom(buildPartial());
8458          }
8459    
8460          public com.google.protobuf.Descriptors.Descriptor
8461              getDescriptorForType() {
8462            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor;
8463          }
8464    
8465          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto getDefaultInstanceForType() {
8466            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance();
8467          }
8468    
8469          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto build() {
8470            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto result = buildPartial();
8471            if (!result.isInitialized()) {
8472              throw newUninitializedMessageException(result);
8473            }
8474            return result;
8475          }
8476    
8477          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto buildPartial() {
8478            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto(this);
8479            int from_bitField0_ = bitField0_;
8480            int to_bitField0_ = 0;
8481            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
8482              to_bitField0_ |= 0x00000001;
8483            }
8484            if (jidBuilder_ == null) {
8485              result.jid_ = jid_;
8486            } else {
8487              result.jid_ = jidBuilder_.build();
8488            }
8489            result.bitField0_ = to_bitField0_;
8490            onBuilt();
8491            return result;
8492          }
8493    
8494          public Builder mergeFrom(com.google.protobuf.Message other) {
8495            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto) {
8496              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)other);
8497            } else {
8498              super.mergeFrom(other);
8499              return this;
8500            }
8501          }
8502    
8503          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto other) {
8504            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance()) return this;
8505            if (other.hasJid()) {
8506              mergeJid(other.getJid());
8507            }
8508            this.mergeUnknownFields(other.getUnknownFields());
8509            return this;
8510          }
8511    
8512          public final boolean isInitialized() {
8513            if (!hasJid()) {
8514              
8515              return false;
8516            }
8517            if (!getJid().isInitialized()) {
8518              
8519              return false;
8520            }
8521            return true;
8522          }
8523    
8524          public Builder mergeFrom(
8525              com.google.protobuf.CodedInputStream input,
8526              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8527              throws java.io.IOException {
8528            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parsedMessage = null;
8529            try {
8530              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
8531            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
8532              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto) e.getUnfinishedMessage();
8533              throw e;
8534            } finally {
8535              if (parsedMessage != null) {
8536                mergeFrom(parsedMessage);
8537              }
8538            }
8539            return this;
8540          }
8541          private int bitField0_;
8542    
8543          // required .hadoop.hdfs.JournalIdProto jid = 1;
8544          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
8545          private com.google.protobuf.SingleFieldBuilder<
8546              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
8547          /**
8548           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8549           */
8550          public boolean hasJid() {
8551            return ((bitField0_ & 0x00000001) == 0x00000001);
8552          }
8553          /**
8554           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8555           */
8556          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
8557            if (jidBuilder_ == null) {
8558              return jid_;
8559            } else {
8560              return jidBuilder_.getMessage();
8561            }
8562          }
8563          /**
8564           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8565           */
8566          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
8567            if (jidBuilder_ == null) {
8568              if (value == null) {
8569                throw new NullPointerException();
8570              }
8571              jid_ = value;
8572              onChanged();
8573            } else {
8574              jidBuilder_.setMessage(value);
8575            }
8576            bitField0_ |= 0x00000001;
8577            return this;
8578          }
8579          /**
8580           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8581           */
8582          public Builder setJid(
8583              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
8584            if (jidBuilder_ == null) {
8585              jid_ = builderForValue.build();
8586              onChanged();
8587            } else {
8588              jidBuilder_.setMessage(builderForValue.build());
8589            }
8590            bitField0_ |= 0x00000001;
8591            return this;
8592          }
8593          /**
8594           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8595           */
8596          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
8597            if (jidBuilder_ == null) {
8598              if (((bitField0_ & 0x00000001) == 0x00000001) &&
8599                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
8600                jid_ =
8601                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
8602              } else {
8603                jid_ = value;
8604              }
8605              onChanged();
8606            } else {
8607              jidBuilder_.mergeFrom(value);
8608            }
8609            bitField0_ |= 0x00000001;
8610            return this;
8611          }
8612          /**
8613           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8614           */
8615          public Builder clearJid() {
8616            if (jidBuilder_ == null) {
8617              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
8618              onChanged();
8619            } else {
8620              jidBuilder_.clear();
8621            }
8622            bitField0_ = (bitField0_ & ~0x00000001);
8623            return this;
8624          }
8625          /**
8626           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8627           */
8628          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
8629            bitField0_ |= 0x00000001;
8630            onChanged();
8631            return getJidFieldBuilder().getBuilder();
8632          }
8633          /**
8634           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8635           */
8636          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
8637            if (jidBuilder_ != null) {
8638              return jidBuilder_.getMessageOrBuilder();
8639            } else {
8640              return jid_;
8641            }
8642          }
8643          /**
8644           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8645           */
8646          private com.google.protobuf.SingleFieldBuilder<
8647              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
8648              getJidFieldBuilder() {
8649            if (jidBuilder_ == null) {
8650              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
8651                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
8652                      jid_,
8653                      getParentForChildren(),
8654                      isClean());
8655              jid_ = null;
8656            }
8657            return jidBuilder_;
8658          }
8659    
8660          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.IsFormattedRequestProto)
8661        }
8662    
8663        static {
8664          defaultInstance = new IsFormattedRequestProto(true);
8665          defaultInstance.initFields();
8666        }
8667    
8668        // @@protoc_insertion_point(class_scope:hadoop.hdfs.IsFormattedRequestProto)
8669      }
8670    
8671      public interface IsFormattedResponseProtoOrBuilder
8672          extends com.google.protobuf.MessageOrBuilder {
8673    
8674        // required bool isFormatted = 1;
8675        /**
8676         * <code>required bool isFormatted = 1;</code>
8677         */
8678        boolean hasIsFormatted();
8679        /**
8680         * <code>required bool isFormatted = 1;</code>
8681         */
8682        boolean getIsFormatted();
8683      }
8684      /**
8685       * Protobuf type {@code hadoop.hdfs.IsFormattedResponseProto}
8686       */
8687      public static final class IsFormattedResponseProto extends
8688          com.google.protobuf.GeneratedMessage
8689          implements IsFormattedResponseProtoOrBuilder {
8690        // Use IsFormattedResponseProto.newBuilder() to construct.
8691        private IsFormattedResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
8692          super(builder);
8693          this.unknownFields = builder.getUnknownFields();
8694        }
8695        private IsFormattedResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
8696    
8697        private static final IsFormattedResponseProto defaultInstance;
8698        public static IsFormattedResponseProto getDefaultInstance() {
8699          return defaultInstance;
8700        }
8701    
8702        public IsFormattedResponseProto getDefaultInstanceForType() {
8703          return defaultInstance;
8704        }
8705    
8706        private final com.google.protobuf.UnknownFieldSet unknownFields;
8707        @java.lang.Override
8708        public final com.google.protobuf.UnknownFieldSet
8709            getUnknownFields() {
8710          return this.unknownFields;
8711        }
8712        private IsFormattedResponseProto(
8713            com.google.protobuf.CodedInputStream input,
8714            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8715            throws com.google.protobuf.InvalidProtocolBufferException {
8716          initFields();
8717          int mutable_bitField0_ = 0;
8718          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
8719              com.google.protobuf.UnknownFieldSet.newBuilder();
8720          try {
8721            boolean done = false;
8722            while (!done) {
8723              int tag = input.readTag();
8724              switch (tag) {
8725                case 0:
8726                  done = true;
8727                  break;
8728                default: {
8729                  if (!parseUnknownField(input, unknownFields,
8730                                         extensionRegistry, tag)) {
8731                    done = true;
8732                  }
8733                  break;
8734                }
8735                case 8: {
8736                  bitField0_ |= 0x00000001;
8737                  isFormatted_ = input.readBool();
8738                  break;
8739                }
8740              }
8741            }
8742          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
8743            throw e.setUnfinishedMessage(this);
8744          } catch (java.io.IOException e) {
8745            throw new com.google.protobuf.InvalidProtocolBufferException(
8746                e.getMessage()).setUnfinishedMessage(this);
8747          } finally {
8748            this.unknownFields = unknownFields.build();
8749            makeExtensionsImmutable();
8750          }
8751        }
8752        public static final com.google.protobuf.Descriptors.Descriptor
8753            getDescriptor() {
8754          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor;
8755        }
8756    
8757        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
8758            internalGetFieldAccessorTable() {
8759          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedResponseProto_fieldAccessorTable
8760              .ensureFieldAccessorsInitialized(
8761                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.Builder.class);
8762        }
8763    
8764        public static com.google.protobuf.Parser<IsFormattedResponseProto> PARSER =
8765            new com.google.protobuf.AbstractParser<IsFormattedResponseProto>() {
8766          public IsFormattedResponseProto parsePartialFrom(
8767              com.google.protobuf.CodedInputStream input,
8768              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8769              throws com.google.protobuf.InvalidProtocolBufferException {
8770            return new IsFormattedResponseProto(input, extensionRegistry);
8771          }
8772        };
8773    
8774        @java.lang.Override
8775        public com.google.protobuf.Parser<IsFormattedResponseProto> getParserForType() {
8776          return PARSER;
8777        }
8778    
8779        private int bitField0_;
8780        // required bool isFormatted = 1;
8781        public static final int ISFORMATTED_FIELD_NUMBER = 1;
8782        private boolean isFormatted_;
8783        /**
8784         * <code>required bool isFormatted = 1;</code>
8785         */
8786        public boolean hasIsFormatted() {
8787          return ((bitField0_ & 0x00000001) == 0x00000001);
8788        }
8789        /**
8790         * <code>required bool isFormatted = 1;</code>
8791         */
8792        public boolean getIsFormatted() {
8793          return isFormatted_;
8794        }
8795    
8796        private void initFields() {
8797          isFormatted_ = false;
8798        }
8799        private byte memoizedIsInitialized = -1;
8800        public final boolean isInitialized() {
8801          byte isInitialized = memoizedIsInitialized;
8802          if (isInitialized != -1) return isInitialized == 1;
8803    
8804          if (!hasIsFormatted()) {
8805            memoizedIsInitialized = 0;
8806            return false;
8807          }
8808          memoizedIsInitialized = 1;
8809          return true;
8810        }
8811    
8812        public void writeTo(com.google.protobuf.CodedOutputStream output)
8813                            throws java.io.IOException {
8814          getSerializedSize();
8815          if (((bitField0_ & 0x00000001) == 0x00000001)) {
8816            output.writeBool(1, isFormatted_);
8817          }
8818          getUnknownFields().writeTo(output);
8819        }
8820    
8821        private int memoizedSerializedSize = -1;
8822        public int getSerializedSize() {
8823          int size = memoizedSerializedSize;
8824          if (size != -1) return size;
8825    
8826          size = 0;
8827          if (((bitField0_ & 0x00000001) == 0x00000001)) {
8828            size += com.google.protobuf.CodedOutputStream
8829              .computeBoolSize(1, isFormatted_);
8830          }
8831          size += getUnknownFields().getSerializedSize();
8832          memoizedSerializedSize = size;
8833          return size;
8834        }
8835    
8836        private static final long serialVersionUID = 0L;
8837        @java.lang.Override
8838        protected java.lang.Object writeReplace()
8839            throws java.io.ObjectStreamException {
8840          return super.writeReplace();
8841        }
8842    
8843        @java.lang.Override
8844        public boolean equals(final java.lang.Object obj) {
8845          if (obj == this) {
8846           return true;
8847          }
8848          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto)) {
8849            return super.equals(obj);
8850          }
8851          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) obj;
8852    
8853          boolean result = true;
8854          result = result && (hasIsFormatted() == other.hasIsFormatted());
8855          if (hasIsFormatted()) {
8856            result = result && (getIsFormatted()
8857                == other.getIsFormatted());
8858          }
8859          result = result &&
8860              getUnknownFields().equals(other.getUnknownFields());
8861          return result;
8862        }
8863    
8864        private int memoizedHashCode = 0;
8865        @java.lang.Override
8866        public int hashCode() {
8867          if (memoizedHashCode != 0) {
8868            return memoizedHashCode;
8869          }
8870          int hash = 41;
8871          hash = (19 * hash) + getDescriptorForType().hashCode();
8872          if (hasIsFormatted()) {
8873            hash = (37 * hash) + ISFORMATTED_FIELD_NUMBER;
8874            hash = (53 * hash) + hashBoolean(getIsFormatted());
8875          }
8876          hash = (29 * hash) + getUnknownFields().hashCode();
8877          memoizedHashCode = hash;
8878          return hash;
8879        }
8880    
8881        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
8882            com.google.protobuf.ByteString data)
8883            throws com.google.protobuf.InvalidProtocolBufferException {
8884          return PARSER.parseFrom(data);
8885        }
8886        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
8887            com.google.protobuf.ByteString data,
8888            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8889            throws com.google.protobuf.InvalidProtocolBufferException {
8890          return PARSER.parseFrom(data, extensionRegistry);
8891        }
8892        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(byte[] data)
8893            throws com.google.protobuf.InvalidProtocolBufferException {
8894          return PARSER.parseFrom(data);
8895        }
8896        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
8897            byte[] data,
8898            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8899            throws com.google.protobuf.InvalidProtocolBufferException {
8900          return PARSER.parseFrom(data, extensionRegistry);
8901        }
8902        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(java.io.InputStream input)
8903            throws java.io.IOException {
8904          return PARSER.parseFrom(input);
8905        }
8906        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
8907            java.io.InputStream input,
8908            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8909            throws java.io.IOException {
8910          return PARSER.parseFrom(input, extensionRegistry);
8911        }
8912        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseDelimitedFrom(java.io.InputStream input)
8913            throws java.io.IOException {
8914          return PARSER.parseDelimitedFrom(input);
8915        }
8916        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseDelimitedFrom(
8917            java.io.InputStream input,
8918            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8919            throws java.io.IOException {
8920          return PARSER.parseDelimitedFrom(input, extensionRegistry);
8921        }
8922        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
8923            com.google.protobuf.CodedInputStream input)
8924            throws java.io.IOException {
8925          return PARSER.parseFrom(input);
8926        }
8927        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
8928            com.google.protobuf.CodedInputStream input,
8929            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8930            throws java.io.IOException {
8931          return PARSER.parseFrom(input, extensionRegistry);
8932        }
8933    
8934        public static Builder newBuilder() { return Builder.create(); }
8935        public Builder newBuilderForType() { return newBuilder(); }
8936        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto prototype) {
8937          return newBuilder().mergeFrom(prototype);
8938        }
8939        public Builder toBuilder() { return newBuilder(this); }
8940    
8941        @java.lang.Override
8942        protected Builder newBuilderForType(
8943            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8944          Builder builder = new Builder(parent);
8945          return builder;
8946        }
8947        /**
8948         * Protobuf type {@code hadoop.hdfs.IsFormattedResponseProto}
8949         */
8950        public static final class Builder extends
8951            com.google.protobuf.GeneratedMessage.Builder<Builder>
8952           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProtoOrBuilder {
8953          public static final com.google.protobuf.Descriptors.Descriptor
8954              getDescriptor() {
8955            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor;
8956          }
8957    
8958          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
8959              internalGetFieldAccessorTable() {
8960            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedResponseProto_fieldAccessorTable
8961                .ensureFieldAccessorsInitialized(
8962                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.Builder.class);
8963          }
8964    
8965          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.newBuilder()
8966          private Builder() {
8967            maybeForceBuilderInitialization();
8968          }
8969    
8970          private Builder(
8971              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8972            super(parent);
8973            maybeForceBuilderInitialization();
8974          }
8975          private void maybeForceBuilderInitialization() {
8976            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
8977            }
8978          }
8979          private static Builder create() {
8980            return new Builder();
8981          }
8982    
8983          public Builder clear() {
8984            super.clear();
8985            isFormatted_ = false;
8986            bitField0_ = (bitField0_ & ~0x00000001);
8987            return this;
8988          }
8989    
8990          public Builder clone() {
8991            return create().mergeFrom(buildPartial());
8992          }
8993    
8994          public com.google.protobuf.Descriptors.Descriptor
8995              getDescriptorForType() {
8996            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor;
8997          }
8998    
8999          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto getDefaultInstanceForType() {
9000            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance();
9001          }
9002    
9003          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto build() {
9004            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto result = buildPartial();
9005            if (!result.isInitialized()) {
9006              throw newUninitializedMessageException(result);
9007            }
9008            return result;
9009          }
9010    
9011          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto buildPartial() {
9012            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto(this);
9013            int from_bitField0_ = bitField0_;
9014            int to_bitField0_ = 0;
9015            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
9016              to_bitField0_ |= 0x00000001;
9017            }
9018            result.isFormatted_ = isFormatted_;
9019            result.bitField0_ = to_bitField0_;
9020            onBuilt();
9021            return result;
9022          }
9023    
9024          public Builder mergeFrom(com.google.protobuf.Message other) {
9025            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) {
9026              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto)other);
9027            } else {
9028              super.mergeFrom(other);
9029              return this;
9030            }
9031          }
9032    
9033          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto other) {
9034            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance()) return this;
9035            if (other.hasIsFormatted()) {
9036              setIsFormatted(other.getIsFormatted());
9037            }
9038            this.mergeUnknownFields(other.getUnknownFields());
9039            return this;
9040          }
9041    
9042          public final boolean isInitialized() {
9043            if (!hasIsFormatted()) {
9044              
9045              return false;
9046            }
9047            return true;
9048          }
9049    
9050          public Builder mergeFrom(
9051              com.google.protobuf.CodedInputStream input,
9052              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9053              throws java.io.IOException {
9054            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parsedMessage = null;
9055            try {
9056              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
9057            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
9058              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) e.getUnfinishedMessage();
9059              throw e;
9060            } finally {
9061              if (parsedMessage != null) {
9062                mergeFrom(parsedMessage);
9063              }
9064            }
9065            return this;
9066          }
9067          private int bitField0_;
9068    
9069          // required bool isFormatted = 1;
9070          private boolean isFormatted_ ;
9071          /**
9072           * <code>required bool isFormatted = 1;</code>
9073           */
9074          public boolean hasIsFormatted() {
9075            return ((bitField0_ & 0x00000001) == 0x00000001);
9076          }
9077          /**
9078           * <code>required bool isFormatted = 1;</code>
9079           */
9080          public boolean getIsFormatted() {
9081            return isFormatted_;
9082          }
9083          /**
9084           * <code>required bool isFormatted = 1;</code>
9085           */
9086          public Builder setIsFormatted(boolean value) {
9087            bitField0_ |= 0x00000001;
9088            isFormatted_ = value;
9089            onChanged();
9090            return this;
9091          }
9092          /**
9093           * <code>required bool isFormatted = 1;</code>
9094           */
9095          public Builder clearIsFormatted() {
9096            bitField0_ = (bitField0_ & ~0x00000001);
9097            isFormatted_ = false;
9098            onChanged();
9099            return this;
9100          }
9101    
9102          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.IsFormattedResponseProto)
9103        }
9104    
9105        static {
9106          defaultInstance = new IsFormattedResponseProto(true);
9107          defaultInstance.initFields();
9108        }
9109    
9110        // @@protoc_insertion_point(class_scope:hadoop.hdfs.IsFormattedResponseProto)
9111      }
9112    
9113      public interface GetJournalStateRequestProtoOrBuilder
9114          extends com.google.protobuf.MessageOrBuilder {
9115    
9116        // required .hadoop.hdfs.JournalIdProto jid = 1;
9117        /**
9118         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9119         */
9120        boolean hasJid();
9121        /**
9122         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9123         */
9124        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
9125        /**
9126         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9127         */
9128        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
9129      }
9130      /**
9131       * Protobuf type {@code hadoop.hdfs.GetJournalStateRequestProto}
9132       *
9133       * <pre>
9134       **
9135       * getJournalState()
9136       * </pre>
9137       */
9138      public static final class GetJournalStateRequestProto extends
9139          com.google.protobuf.GeneratedMessage
9140          implements GetJournalStateRequestProtoOrBuilder {
9141        // Use GetJournalStateRequestProto.newBuilder() to construct.
9142        private GetJournalStateRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
9143          super(builder);
9144          this.unknownFields = builder.getUnknownFields();
9145        }
9146        private GetJournalStateRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
9147    
9148        private static final GetJournalStateRequestProto defaultInstance;
9149        public static GetJournalStateRequestProto getDefaultInstance() {
9150          return defaultInstance;
9151        }
9152    
9153        public GetJournalStateRequestProto getDefaultInstanceForType() {
9154          return defaultInstance;
9155        }
9156    
9157        private final com.google.protobuf.UnknownFieldSet unknownFields;
9158        @java.lang.Override
9159        public final com.google.protobuf.UnknownFieldSet
9160            getUnknownFields() {
9161          return this.unknownFields;
9162        }
9163        private GetJournalStateRequestProto(
9164            com.google.protobuf.CodedInputStream input,
9165            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9166            throws com.google.protobuf.InvalidProtocolBufferException {
9167          initFields();
9168          int mutable_bitField0_ = 0;
9169          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
9170              com.google.protobuf.UnknownFieldSet.newBuilder();
9171          try {
9172            boolean done = false;
9173            while (!done) {
9174              int tag = input.readTag();
9175              switch (tag) {
9176                case 0:
9177                  done = true;
9178                  break;
9179                default: {
9180                  if (!parseUnknownField(input, unknownFields,
9181                                         extensionRegistry, tag)) {
9182                    done = true;
9183                  }
9184                  break;
9185                }
9186                case 10: {
9187                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
9188                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
9189                    subBuilder = jid_.toBuilder();
9190                  }
9191                  jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
9192                  if (subBuilder != null) {
9193                    subBuilder.mergeFrom(jid_);
9194                    jid_ = subBuilder.buildPartial();
9195                  }
9196                  bitField0_ |= 0x00000001;
9197                  break;
9198                }
9199              }
9200            }
9201          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
9202            throw e.setUnfinishedMessage(this);
9203          } catch (java.io.IOException e) {
9204            throw new com.google.protobuf.InvalidProtocolBufferException(
9205                e.getMessage()).setUnfinishedMessage(this);
9206          } finally {
9207            this.unknownFields = unknownFields.build();
9208            makeExtensionsImmutable();
9209          }
9210        }
9211        public static final com.google.protobuf.Descriptors.Descriptor
9212            getDescriptor() {
9213          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor;
9214        }
9215    
9216        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
9217            internalGetFieldAccessorTable() {
9218          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateRequestProto_fieldAccessorTable
9219              .ensureFieldAccessorsInitialized(
9220                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.Builder.class);
9221        }
9222    
9223        public static com.google.protobuf.Parser<GetJournalStateRequestProto> PARSER =
9224            new com.google.protobuf.AbstractParser<GetJournalStateRequestProto>() {
9225          public GetJournalStateRequestProto parsePartialFrom(
9226              com.google.protobuf.CodedInputStream input,
9227              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9228              throws com.google.protobuf.InvalidProtocolBufferException {
9229            return new GetJournalStateRequestProto(input, extensionRegistry);
9230          }
9231        };
9232    
9233        @java.lang.Override
9234        public com.google.protobuf.Parser<GetJournalStateRequestProto> getParserForType() {
9235          return PARSER;
9236        }
9237    
9238        private int bitField0_;
9239        // required .hadoop.hdfs.JournalIdProto jid = 1;
9240        public static final int JID_FIELD_NUMBER = 1;
9241        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
9242        /**
9243         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9244         */
9245        public boolean hasJid() {
9246          return ((bitField0_ & 0x00000001) == 0x00000001);
9247        }
9248        /**
9249         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9250         */
9251        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
9252          return jid_;
9253        }
9254        /**
9255         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9256         */
9257        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
9258          return jid_;
9259        }
9260    
9261        private void initFields() {
9262          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
9263        }
9264        private byte memoizedIsInitialized = -1;
9265        public final boolean isInitialized() {
9266          byte isInitialized = memoizedIsInitialized;
9267          if (isInitialized != -1) return isInitialized == 1;
9268    
9269          if (!hasJid()) {
9270            memoizedIsInitialized = 0;
9271            return false;
9272          }
9273          if (!getJid().isInitialized()) {
9274            memoizedIsInitialized = 0;
9275            return false;
9276          }
9277          memoizedIsInitialized = 1;
9278          return true;
9279        }
9280    
9281        public void writeTo(com.google.protobuf.CodedOutputStream output)
9282                            throws java.io.IOException {
9283          getSerializedSize();
9284          if (((bitField0_ & 0x00000001) == 0x00000001)) {
9285            output.writeMessage(1, jid_);
9286          }
9287          getUnknownFields().writeTo(output);
9288        }
9289    
9290        private int memoizedSerializedSize = -1;
9291        public int getSerializedSize() {
9292          int size = memoizedSerializedSize;
9293          if (size != -1) return size;
9294    
9295          size = 0;
9296          if (((bitField0_ & 0x00000001) == 0x00000001)) {
9297            size += com.google.protobuf.CodedOutputStream
9298              .computeMessageSize(1, jid_);
9299          }
9300          size += getUnknownFields().getSerializedSize();
9301          memoizedSerializedSize = size;
9302          return size;
9303        }
9304    
9305        private static final long serialVersionUID = 0L;
9306        @java.lang.Override
9307        protected java.lang.Object writeReplace()
9308            throws java.io.ObjectStreamException {
9309          return super.writeReplace();
9310        }
9311    
9312        @java.lang.Override
9313        public boolean equals(final java.lang.Object obj) {
9314          if (obj == this) {
9315           return true;
9316          }
9317          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)) {
9318            return super.equals(obj);
9319          }
9320          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto) obj;
9321    
9322          boolean result = true;
9323          result = result && (hasJid() == other.hasJid());
9324          if (hasJid()) {
9325            result = result && getJid()
9326                .equals(other.getJid());
9327          }
9328          result = result &&
9329              getUnknownFields().equals(other.getUnknownFields());
9330          return result;
9331        }
9332    
9333        private int memoizedHashCode = 0;
9334        @java.lang.Override
9335        public int hashCode() {
9336          if (memoizedHashCode != 0) {
9337            return memoizedHashCode;
9338          }
9339          int hash = 41;
9340          hash = (19 * hash) + getDescriptorForType().hashCode();
9341          if (hasJid()) {
9342            hash = (37 * hash) + JID_FIELD_NUMBER;
9343            hash = (53 * hash) + getJid().hashCode();
9344          }
9345          hash = (29 * hash) + getUnknownFields().hashCode();
9346          memoizedHashCode = hash;
9347          return hash;
9348        }
9349    
9350        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
9351            com.google.protobuf.ByteString data)
9352            throws com.google.protobuf.InvalidProtocolBufferException {
9353          return PARSER.parseFrom(data);
9354        }
9355        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
9356            com.google.protobuf.ByteString data,
9357            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9358            throws com.google.protobuf.InvalidProtocolBufferException {
9359          return PARSER.parseFrom(data, extensionRegistry);
9360        }
9361        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(byte[] data)
9362            throws com.google.protobuf.InvalidProtocolBufferException {
9363          return PARSER.parseFrom(data);
9364        }
9365        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
9366            byte[] data,
9367            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9368            throws com.google.protobuf.InvalidProtocolBufferException {
9369          return PARSER.parseFrom(data, extensionRegistry);
9370        }
9371        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(java.io.InputStream input)
9372            throws java.io.IOException {
9373          return PARSER.parseFrom(input);
9374        }
9375        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
9376            java.io.InputStream input,
9377            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9378            throws java.io.IOException {
9379          return PARSER.parseFrom(input, extensionRegistry);
9380        }
9381        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseDelimitedFrom(java.io.InputStream input)
9382            throws java.io.IOException {
9383          return PARSER.parseDelimitedFrom(input);
9384        }
9385        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseDelimitedFrom(
9386            java.io.InputStream input,
9387            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9388            throws java.io.IOException {
9389          return PARSER.parseDelimitedFrom(input, extensionRegistry);
9390        }
9391        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
9392            com.google.protobuf.CodedInputStream input)
9393            throws java.io.IOException {
9394          return PARSER.parseFrom(input);
9395        }
9396        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
9397            com.google.protobuf.CodedInputStream input,
9398            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9399            throws java.io.IOException {
9400          return PARSER.parseFrom(input, extensionRegistry);
9401        }
9402    
9403        public static Builder newBuilder() { return Builder.create(); }
9404        public Builder newBuilderForType() { return newBuilder(); }
9405        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto prototype) {
9406          return newBuilder().mergeFrom(prototype);
9407        }
9408        public Builder toBuilder() { return newBuilder(this); }
9409    
9410        @java.lang.Override
9411        protected Builder newBuilderForType(
9412            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
9413          Builder builder = new Builder(parent);
9414          return builder;
9415        }
9416        /**
9417         * Protobuf type {@code hadoop.hdfs.GetJournalStateRequestProto}
9418         *
9419         * <pre>
9420         **
9421         * getJournalState()
9422         * </pre>
9423         */
9424        public static final class Builder extends
9425            com.google.protobuf.GeneratedMessage.Builder<Builder>
9426           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProtoOrBuilder {
9427          public static final com.google.protobuf.Descriptors.Descriptor
9428              getDescriptor() {
9429            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor;
9430          }
9431    
9432          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
9433              internalGetFieldAccessorTable() {
9434            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateRequestProto_fieldAccessorTable
9435                .ensureFieldAccessorsInitialized(
9436                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.Builder.class);
9437          }
9438    
9439          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.newBuilder()
9440          private Builder() {
9441            maybeForceBuilderInitialization();
9442          }
9443    
9444          private Builder(
9445              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
9446            super(parent);
9447            maybeForceBuilderInitialization();
9448          }
9449          private void maybeForceBuilderInitialization() {
9450            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
9451              getJidFieldBuilder();
9452            }
9453          }
9454          private static Builder create() {
9455            return new Builder();
9456          }
9457    
9458          public Builder clear() {
9459            super.clear();
9460            if (jidBuilder_ == null) {
9461              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
9462            } else {
9463              jidBuilder_.clear();
9464            }
9465            bitField0_ = (bitField0_ & ~0x00000001);
9466            return this;
9467          }
9468    
9469          public Builder clone() {
9470            return create().mergeFrom(buildPartial());
9471          }
9472    
9473          public com.google.protobuf.Descriptors.Descriptor
9474              getDescriptorForType() {
9475            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor;
9476          }
9477    
9478          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto getDefaultInstanceForType() {
9479            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance();
9480          }
9481    
9482          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto build() {
9483            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto result = buildPartial();
9484            if (!result.isInitialized()) {
9485              throw newUninitializedMessageException(result);
9486            }
9487            return result;
9488          }
9489    
9490          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto buildPartial() {
9491            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto(this);
9492            int from_bitField0_ = bitField0_;
9493            int to_bitField0_ = 0;
9494            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
9495              to_bitField0_ |= 0x00000001;
9496            }
9497            if (jidBuilder_ == null) {
9498              result.jid_ = jid_;
9499            } else {
9500              result.jid_ = jidBuilder_.build();
9501            }
9502            result.bitField0_ = to_bitField0_;
9503            onBuilt();
9504            return result;
9505          }
9506    
9507          public Builder mergeFrom(com.google.protobuf.Message other) {
9508            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto) {
9509              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)other);
9510            } else {
9511              super.mergeFrom(other);
9512              return this;
9513            }
9514          }
9515    
9516          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto other) {
9517            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance()) return this;
9518            if (other.hasJid()) {
9519              mergeJid(other.getJid());
9520            }
9521            this.mergeUnknownFields(other.getUnknownFields());
9522            return this;
9523          }
9524    
9525          public final boolean isInitialized() {
9526            if (!hasJid()) {
9527              
9528              return false;
9529            }
9530            if (!getJid().isInitialized()) {
9531              
9532              return false;
9533            }
9534            return true;
9535          }
9536    
9537          public Builder mergeFrom(
9538              com.google.protobuf.CodedInputStream input,
9539              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9540              throws java.io.IOException {
9541            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parsedMessage = null;
9542            try {
9543              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
9544            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
9545              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto) e.getUnfinishedMessage();
9546              throw e;
9547            } finally {
9548              if (parsedMessage != null) {
9549                mergeFrom(parsedMessage);
9550              }
9551            }
9552            return this;
9553          }
9554          private int bitField0_;
9555    
9556          // required .hadoop.hdfs.JournalIdProto jid = 1;
9557          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
9558          private com.google.protobuf.SingleFieldBuilder<
9559              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
9560          /**
9561           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9562           */
9563          public boolean hasJid() {
9564            return ((bitField0_ & 0x00000001) == 0x00000001);
9565          }
9566          /**
9567           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9568           */
9569          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
9570            if (jidBuilder_ == null) {
9571              return jid_;
9572            } else {
9573              return jidBuilder_.getMessage();
9574            }
9575          }
9576          /**
9577           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9578           */
9579          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
9580            if (jidBuilder_ == null) {
9581              if (value == null) {
9582                throw new NullPointerException();
9583              }
9584              jid_ = value;
9585              onChanged();
9586            } else {
9587              jidBuilder_.setMessage(value);
9588            }
9589            bitField0_ |= 0x00000001;
9590            return this;
9591          }
9592          /**
9593           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9594           */
9595          public Builder setJid(
9596              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
9597            if (jidBuilder_ == null) {
9598              jid_ = builderForValue.build();
9599              onChanged();
9600            } else {
9601              jidBuilder_.setMessage(builderForValue.build());
9602            }
9603            bitField0_ |= 0x00000001;
9604            return this;
9605          }
9606          /**
9607           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9608           */
9609          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
9610            if (jidBuilder_ == null) {
9611              if (((bitField0_ & 0x00000001) == 0x00000001) &&
9612                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
9613                jid_ =
9614                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
9615              } else {
9616                jid_ = value;
9617              }
9618              onChanged();
9619            } else {
9620              jidBuilder_.mergeFrom(value);
9621            }
9622            bitField0_ |= 0x00000001;
9623            return this;
9624          }
9625          /**
9626           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9627           */
9628          public Builder clearJid() {
9629            if (jidBuilder_ == null) {
9630              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
9631              onChanged();
9632            } else {
9633              jidBuilder_.clear();
9634            }
9635            bitField0_ = (bitField0_ & ~0x00000001);
9636            return this;
9637          }
9638          /**
9639           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9640           */
9641          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
9642            bitField0_ |= 0x00000001;
9643            onChanged();
9644            return getJidFieldBuilder().getBuilder();
9645          }
9646          /**
9647           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9648           */
9649          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
9650            if (jidBuilder_ != null) {
9651              return jidBuilder_.getMessageOrBuilder();
9652            } else {
9653              return jid_;
9654            }
9655          }
9656          /**
9657           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9658           */
9659          private com.google.protobuf.SingleFieldBuilder<
9660              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
9661              getJidFieldBuilder() {
9662            if (jidBuilder_ == null) {
9663              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
9664                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
9665                      jid_,
9666                      getParentForChildren(),
9667                      isClean());
9668              jid_ = null;
9669            }
9670            return jidBuilder_;
9671          }
9672    
9673          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetJournalStateRequestProto)
9674        }
9675    
9676        static {
9677          defaultInstance = new GetJournalStateRequestProto(true);
9678          defaultInstance.initFields();
9679        }
9680    
9681        // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetJournalStateRequestProto)
9682      }
9683    
9684      public interface GetJournalStateResponseProtoOrBuilder
9685          extends com.google.protobuf.MessageOrBuilder {
9686    
9687        // required uint64 lastPromisedEpoch = 1;
9688        /**
9689         * <code>required uint64 lastPromisedEpoch = 1;</code>
9690         */
9691        boolean hasLastPromisedEpoch();
9692        /**
9693         * <code>required uint64 lastPromisedEpoch = 1;</code>
9694         */
9695        long getLastPromisedEpoch();
9696    
9697        // required uint32 httpPort = 2;
9698        /**
9699         * <code>required uint32 httpPort = 2;</code>
9700         */
9701        boolean hasHttpPort();
9702        /**
9703         * <code>required uint32 httpPort = 2;</code>
9704         */
9705        int getHttpPort();
9706      }
9707      /**
9708       * Protobuf type {@code hadoop.hdfs.GetJournalStateResponseProto}
9709       */
9710      public static final class GetJournalStateResponseProto extends
9711          com.google.protobuf.GeneratedMessage
9712          implements GetJournalStateResponseProtoOrBuilder {
9713        // Use GetJournalStateResponseProto.newBuilder() to construct.
9714        private GetJournalStateResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
9715          super(builder);
9716          this.unknownFields = builder.getUnknownFields();
9717        }
9718        private GetJournalStateResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
9719    
9720        private static final GetJournalStateResponseProto defaultInstance;
9721        public static GetJournalStateResponseProto getDefaultInstance() {
9722          return defaultInstance;
9723        }
9724    
9725        public GetJournalStateResponseProto getDefaultInstanceForType() {
9726          return defaultInstance;
9727        }
9728    
9729        private final com.google.protobuf.UnknownFieldSet unknownFields;
9730        @java.lang.Override
9731        public final com.google.protobuf.UnknownFieldSet
9732            getUnknownFields() {
9733          return this.unknownFields;
9734        }
9735        private GetJournalStateResponseProto(
9736            com.google.protobuf.CodedInputStream input,
9737            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9738            throws com.google.protobuf.InvalidProtocolBufferException {
9739          initFields();
9740          int mutable_bitField0_ = 0;
9741          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
9742              com.google.protobuf.UnknownFieldSet.newBuilder();
9743          try {
9744            boolean done = false;
9745            while (!done) {
9746              int tag = input.readTag();
9747              switch (tag) {
9748                case 0:
9749                  done = true;
9750                  break;
9751                default: {
9752                  if (!parseUnknownField(input, unknownFields,
9753                                         extensionRegistry, tag)) {
9754                    done = true;
9755                  }
9756                  break;
9757                }
9758                case 8: {
9759                  bitField0_ |= 0x00000001;
9760                  lastPromisedEpoch_ = input.readUInt64();
9761                  break;
9762                }
9763                case 16: {
9764                  bitField0_ |= 0x00000002;
9765                  httpPort_ = input.readUInt32();
9766                  break;
9767                }
9768              }
9769            }
9770          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
9771            throw e.setUnfinishedMessage(this);
9772          } catch (java.io.IOException e) {
9773            throw new com.google.protobuf.InvalidProtocolBufferException(
9774                e.getMessage()).setUnfinishedMessage(this);
9775          } finally {
9776            this.unknownFields = unknownFields.build();
9777            makeExtensionsImmutable();
9778          }
9779        }
9780        public static final com.google.protobuf.Descriptors.Descriptor
9781            getDescriptor() {
9782          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor;
9783        }
9784    
9785        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
9786            internalGetFieldAccessorTable() {
9787          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateResponseProto_fieldAccessorTable
9788              .ensureFieldAccessorsInitialized(
9789                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.Builder.class);
9790        }
9791    
9792        public static com.google.protobuf.Parser<GetJournalStateResponseProto> PARSER =
9793            new com.google.protobuf.AbstractParser<GetJournalStateResponseProto>() {
9794          public GetJournalStateResponseProto parsePartialFrom(
9795              com.google.protobuf.CodedInputStream input,
9796              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9797              throws com.google.protobuf.InvalidProtocolBufferException {
9798            return new GetJournalStateResponseProto(input, extensionRegistry);
9799          }
9800        };
9801    
9802        @java.lang.Override
9803        public com.google.protobuf.Parser<GetJournalStateResponseProto> getParserForType() {
9804          return PARSER;
9805        }
9806    
9807        private int bitField0_;
9808        // required uint64 lastPromisedEpoch = 1;
9809        public static final int LASTPROMISEDEPOCH_FIELD_NUMBER = 1;
9810        private long lastPromisedEpoch_;
9811        /**
9812         * <code>required uint64 lastPromisedEpoch = 1;</code>
9813         */
9814        public boolean hasLastPromisedEpoch() {
9815          return ((bitField0_ & 0x00000001) == 0x00000001);
9816        }
9817        /**
9818         * <code>required uint64 lastPromisedEpoch = 1;</code>
9819         */
9820        public long getLastPromisedEpoch() {
9821          return lastPromisedEpoch_;
9822        }
9823    
9824        // required uint32 httpPort = 2;
9825        public static final int HTTPPORT_FIELD_NUMBER = 2;
9826        private int httpPort_;
9827        /**
9828         * <code>required uint32 httpPort = 2;</code>
9829         */
9830        public boolean hasHttpPort() {
9831          return ((bitField0_ & 0x00000002) == 0x00000002);
9832        }
9833        /**
9834         * <code>required uint32 httpPort = 2;</code>
9835         */
9836        public int getHttpPort() {
9837          return httpPort_;
9838        }
9839    
9840        private void initFields() {
9841          lastPromisedEpoch_ = 0L;
9842          httpPort_ = 0;
9843        }
9844        private byte memoizedIsInitialized = -1;
9845        public final boolean isInitialized() {
9846          byte isInitialized = memoizedIsInitialized;
9847          if (isInitialized != -1) return isInitialized == 1;
9848    
9849          if (!hasLastPromisedEpoch()) {
9850            memoizedIsInitialized = 0;
9851            return false;
9852          }
9853          if (!hasHttpPort()) {
9854            memoizedIsInitialized = 0;
9855            return false;
9856          }
9857          memoizedIsInitialized = 1;
9858          return true;
9859        }
9860    
9861        public void writeTo(com.google.protobuf.CodedOutputStream output)
9862                            throws java.io.IOException {
9863          getSerializedSize();
9864          if (((bitField0_ & 0x00000001) == 0x00000001)) {
9865            output.writeUInt64(1, lastPromisedEpoch_);
9866          }
9867          if (((bitField0_ & 0x00000002) == 0x00000002)) {
9868            output.writeUInt32(2, httpPort_);
9869          }
9870          getUnknownFields().writeTo(output);
9871        }
9872    
9873        private int memoizedSerializedSize = -1;
9874        public int getSerializedSize() {
9875          int size = memoizedSerializedSize;
9876          if (size != -1) return size;
9877    
9878          size = 0;
9879          if (((bitField0_ & 0x00000001) == 0x00000001)) {
9880            size += com.google.protobuf.CodedOutputStream
9881              .computeUInt64Size(1, lastPromisedEpoch_);
9882          }
9883          if (((bitField0_ & 0x00000002) == 0x00000002)) {
9884            size += com.google.protobuf.CodedOutputStream
9885              .computeUInt32Size(2, httpPort_);
9886          }
9887          size += getUnknownFields().getSerializedSize();
9888          memoizedSerializedSize = size;
9889          return size;
9890        }
9891    
9892        private static final long serialVersionUID = 0L;
9893        @java.lang.Override
9894        protected java.lang.Object writeReplace()
9895            throws java.io.ObjectStreamException {
9896          return super.writeReplace();
9897        }
9898    
9899        @java.lang.Override
9900        public boolean equals(final java.lang.Object obj) {
9901          if (obj == this) {
9902           return true;
9903          }
9904          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto)) {
9905            return super.equals(obj);
9906          }
9907          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) obj;
9908    
9909          boolean result = true;
9910          result = result && (hasLastPromisedEpoch() == other.hasLastPromisedEpoch());
9911          if (hasLastPromisedEpoch()) {
9912            result = result && (getLastPromisedEpoch()
9913                == other.getLastPromisedEpoch());
9914          }
9915          result = result && (hasHttpPort() == other.hasHttpPort());
9916          if (hasHttpPort()) {
9917            result = result && (getHttpPort()
9918                == other.getHttpPort());
9919          }
9920          result = result &&
9921              getUnknownFields().equals(other.getUnknownFields());
9922          return result;
9923        }
9924    
9925        private int memoizedHashCode = 0;
9926        @java.lang.Override
9927        public int hashCode() {
9928          if (memoizedHashCode != 0) {
9929            return memoizedHashCode;
9930          }
9931          int hash = 41;
9932          hash = (19 * hash) + getDescriptorForType().hashCode();
9933          if (hasLastPromisedEpoch()) {
9934            hash = (37 * hash) + LASTPROMISEDEPOCH_FIELD_NUMBER;
9935            hash = (53 * hash) + hashLong(getLastPromisedEpoch());
9936          }
9937          if (hasHttpPort()) {
9938            hash = (37 * hash) + HTTPPORT_FIELD_NUMBER;
9939            hash = (53 * hash) + getHttpPort();
9940          }
9941          hash = (29 * hash) + getUnknownFields().hashCode();
9942          memoizedHashCode = hash;
9943          return hash;
9944        }
9945    
9946        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
9947            com.google.protobuf.ByteString data)
9948            throws com.google.protobuf.InvalidProtocolBufferException {
9949          return PARSER.parseFrom(data);
9950        }
9951        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
9952            com.google.protobuf.ByteString data,
9953            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9954            throws com.google.protobuf.InvalidProtocolBufferException {
9955          return PARSER.parseFrom(data, extensionRegistry);
9956        }
9957        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(byte[] data)
9958            throws com.google.protobuf.InvalidProtocolBufferException {
9959          return PARSER.parseFrom(data);
9960        }
9961        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
9962            byte[] data,
9963            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9964            throws com.google.protobuf.InvalidProtocolBufferException {
9965          return PARSER.parseFrom(data, extensionRegistry);
9966        }
9967        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(java.io.InputStream input)
9968            throws java.io.IOException {
9969          return PARSER.parseFrom(input);
9970        }
9971        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
9972            java.io.InputStream input,
9973            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9974            throws java.io.IOException {
9975          return PARSER.parseFrom(input, extensionRegistry);
9976        }
9977        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseDelimitedFrom(java.io.InputStream input)
9978            throws java.io.IOException {
9979          return PARSER.parseDelimitedFrom(input);
9980        }
9981        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseDelimitedFrom(
9982            java.io.InputStream input,
9983            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9984            throws java.io.IOException {
9985          return PARSER.parseDelimitedFrom(input, extensionRegistry);
9986        }
9987        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
9988            com.google.protobuf.CodedInputStream input)
9989            throws java.io.IOException {
9990          return PARSER.parseFrom(input);
9991        }
9992        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
9993            com.google.protobuf.CodedInputStream input,
9994            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9995            throws java.io.IOException {
9996          return PARSER.parseFrom(input, extensionRegistry);
9997        }
9998    
9999        public static Builder newBuilder() { return Builder.create(); }
10000        public Builder newBuilderForType() { return newBuilder(); }
10001        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto prototype) {
10002          return newBuilder().mergeFrom(prototype);
10003        }
10004        public Builder toBuilder() { return newBuilder(this); }
10005    
10006        @java.lang.Override
10007        protected Builder newBuilderForType(
10008            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
10009          Builder builder = new Builder(parent);
10010          return builder;
10011        }
10012        /**
10013         * Protobuf type {@code hadoop.hdfs.GetJournalStateResponseProto}
10014         */
10015        public static final class Builder extends
10016            com.google.protobuf.GeneratedMessage.Builder<Builder>
10017           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProtoOrBuilder {
10018          public static final com.google.protobuf.Descriptors.Descriptor
10019              getDescriptor() {
10020            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor;
10021          }
10022    
10023          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
10024              internalGetFieldAccessorTable() {
10025            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateResponseProto_fieldAccessorTable
10026                .ensureFieldAccessorsInitialized(
10027                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.Builder.class);
10028          }
10029    
10030          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.newBuilder()
10031          private Builder() {
10032            maybeForceBuilderInitialization();
10033          }
10034    
10035          private Builder(
10036              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
10037            super(parent);
10038            maybeForceBuilderInitialization();
10039          }
10040          private void maybeForceBuilderInitialization() {
10041            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
10042            }
10043          }
10044          private static Builder create() {
10045            return new Builder();
10046          }
10047    
10048          public Builder clear() {
10049            super.clear();
10050            lastPromisedEpoch_ = 0L;
10051            bitField0_ = (bitField0_ & ~0x00000001);
10052            httpPort_ = 0;
10053            bitField0_ = (bitField0_ & ~0x00000002);
10054            return this;
10055          }
10056    
10057          public Builder clone() {
10058            return create().mergeFrom(buildPartial());
10059          }
10060    
10061          public com.google.protobuf.Descriptors.Descriptor
10062              getDescriptorForType() {
10063            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor;
10064          }
10065    
10066          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto getDefaultInstanceForType() {
10067            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance();
10068          }
10069    
10070          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto build() {
10071            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto result = buildPartial();
10072            if (!result.isInitialized()) {
10073              throw newUninitializedMessageException(result);
10074            }
10075            return result;
10076          }
10077    
10078          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto buildPartial() {
10079            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto(this);
10080            int from_bitField0_ = bitField0_;
10081            int to_bitField0_ = 0;
10082            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
10083              to_bitField0_ |= 0x00000001;
10084            }
10085            result.lastPromisedEpoch_ = lastPromisedEpoch_;
10086            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
10087              to_bitField0_ |= 0x00000002;
10088            }
10089            result.httpPort_ = httpPort_;
10090            result.bitField0_ = to_bitField0_;
10091            onBuilt();
10092            return result;
10093          }
10094    
10095          public Builder mergeFrom(com.google.protobuf.Message other) {
10096            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) {
10097              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto)other);
10098            } else {
10099              super.mergeFrom(other);
10100              return this;
10101            }
10102          }
10103    
10104          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto other) {
10105            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance()) return this;
10106            if (other.hasLastPromisedEpoch()) {
10107              setLastPromisedEpoch(other.getLastPromisedEpoch());
10108            }
10109            if (other.hasHttpPort()) {
10110              setHttpPort(other.getHttpPort());
10111            }
10112            this.mergeUnknownFields(other.getUnknownFields());
10113            return this;
10114          }
10115    
10116          public final boolean isInitialized() {
10117            if (!hasLastPromisedEpoch()) {
10118              
10119              return false;
10120            }
10121            if (!hasHttpPort()) {
10122              
10123              return false;
10124            }
10125            return true;
10126          }
10127    
10128          public Builder mergeFrom(
10129              com.google.protobuf.CodedInputStream input,
10130              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10131              throws java.io.IOException {
10132            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parsedMessage = null;
10133            try {
10134              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
10135            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
10136              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) e.getUnfinishedMessage();
10137              throw e;
10138            } finally {
10139              if (parsedMessage != null) {
10140                mergeFrom(parsedMessage);
10141              }
10142            }
10143            return this;
10144          }
10145          private int bitField0_;
10146    
10147          // required uint64 lastPromisedEpoch = 1;
10148          private long lastPromisedEpoch_ ;
10149          /**
10150           * <code>required uint64 lastPromisedEpoch = 1;</code>
10151           */
10152          public boolean hasLastPromisedEpoch() {
10153            return ((bitField0_ & 0x00000001) == 0x00000001);
10154          }
10155          /**
10156           * <code>required uint64 lastPromisedEpoch = 1;</code>
10157           */
10158          public long getLastPromisedEpoch() {
10159            return lastPromisedEpoch_;
10160          }
10161          /**
10162           * <code>required uint64 lastPromisedEpoch = 1;</code>
10163           */
10164          public Builder setLastPromisedEpoch(long value) {
10165            bitField0_ |= 0x00000001;
10166            lastPromisedEpoch_ = value;
10167            onChanged();
10168            return this;
10169          }
10170          /**
10171           * <code>required uint64 lastPromisedEpoch = 1;</code>
10172           */
10173          public Builder clearLastPromisedEpoch() {
10174            bitField0_ = (bitField0_ & ~0x00000001);
10175            lastPromisedEpoch_ = 0L;
10176            onChanged();
10177            return this;
10178          }
10179    
10180          // required uint32 httpPort = 2;
10181          private int httpPort_ ;
10182          /**
10183           * <code>required uint32 httpPort = 2;</code>
10184           */
10185          public boolean hasHttpPort() {
10186            return ((bitField0_ & 0x00000002) == 0x00000002);
10187          }
10188          /**
10189           * <code>required uint32 httpPort = 2;</code>
10190           */
10191          public int getHttpPort() {
10192            return httpPort_;
10193          }
10194          /**
10195           * <code>required uint32 httpPort = 2;</code>
10196           */
10197          public Builder setHttpPort(int value) {
10198            bitField0_ |= 0x00000002;
10199            httpPort_ = value;
10200            onChanged();
10201            return this;
10202          }
10203          /**
10204           * <code>required uint32 httpPort = 2;</code>
10205           */
10206          public Builder clearHttpPort() {
10207            bitField0_ = (bitField0_ & ~0x00000002);
10208            httpPort_ = 0;
10209            onChanged();
10210            return this;
10211          }
10212    
10213          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetJournalStateResponseProto)
10214        }
10215    
10216        static {
10217          defaultInstance = new GetJournalStateResponseProto(true);
10218          defaultInstance.initFields();
10219        }
10220    
10221        // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetJournalStateResponseProto)
10222      }
10223    
10224      public interface FormatRequestProtoOrBuilder
10225          extends com.google.protobuf.MessageOrBuilder {
10226    
10227        // required .hadoop.hdfs.JournalIdProto jid = 1;
10228        /**
10229         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10230         */
10231        boolean hasJid();
10232        /**
10233         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10234         */
10235        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
10236        /**
10237         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10238         */
10239        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
10240    
10241        // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
10242        /**
10243         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10244         */
10245        boolean hasNsInfo();
10246        /**
10247         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10248         */
10249        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo();
10250        /**
10251         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10252         */
10253        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder();
10254      }
10255      /**
10256       * Protobuf type {@code hadoop.hdfs.FormatRequestProto}
10257       *
10258       * <pre>
10259       **
10260       * format()
10261       * </pre>
10262       */
10263      public static final class FormatRequestProto extends
10264          com.google.protobuf.GeneratedMessage
10265          implements FormatRequestProtoOrBuilder {
10266        // Use FormatRequestProto.newBuilder() to construct.
10267        private FormatRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
10268          super(builder);
10269          this.unknownFields = builder.getUnknownFields();
10270        }
10271        private FormatRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
10272    
10273        private static final FormatRequestProto defaultInstance;
10274        public static FormatRequestProto getDefaultInstance() {
10275          return defaultInstance;
10276        }
10277    
10278        public FormatRequestProto getDefaultInstanceForType() {
10279          return defaultInstance;
10280        }
10281    
10282        private final com.google.protobuf.UnknownFieldSet unknownFields;
10283        @java.lang.Override
10284        public final com.google.protobuf.UnknownFieldSet
10285            getUnknownFields() {
10286          return this.unknownFields;
10287        }
10288        private FormatRequestProto(
10289            com.google.protobuf.CodedInputStream input,
10290            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10291            throws com.google.protobuf.InvalidProtocolBufferException {
10292          initFields();
10293          int mutable_bitField0_ = 0;
10294          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
10295              com.google.protobuf.UnknownFieldSet.newBuilder();
10296          try {
10297            boolean done = false;
10298            while (!done) {
10299              int tag = input.readTag();
10300              switch (tag) {
10301                case 0:
10302                  done = true;
10303                  break;
10304                default: {
10305                  if (!parseUnknownField(input, unknownFields,
10306                                         extensionRegistry, tag)) {
10307                    done = true;
10308                  }
10309                  break;
10310                }
10311                case 10: {
10312                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
10313                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
10314                    subBuilder = jid_.toBuilder();
10315                  }
10316                  jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
10317                  if (subBuilder != null) {
10318                    subBuilder.mergeFrom(jid_);
10319                    jid_ = subBuilder.buildPartial();
10320                  }
10321                  bitField0_ |= 0x00000001;
10322                  break;
10323                }
10324                case 18: {
10325                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder subBuilder = null;
10326                  if (((bitField0_ & 0x00000002) == 0x00000002)) {
10327                    subBuilder = nsInfo_.toBuilder();
10328                  }
10329                  nsInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.PARSER, extensionRegistry);
10330                  if (subBuilder != null) {
10331                    subBuilder.mergeFrom(nsInfo_);
10332                    nsInfo_ = subBuilder.buildPartial();
10333                  }
10334                  bitField0_ |= 0x00000002;
10335                  break;
10336                }
10337              }
10338            }
10339          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
10340            throw e.setUnfinishedMessage(this);
10341          } catch (java.io.IOException e) {
10342            throw new com.google.protobuf.InvalidProtocolBufferException(
10343                e.getMessage()).setUnfinishedMessage(this);
10344          } finally {
10345            this.unknownFields = unknownFields.build();
10346            makeExtensionsImmutable();
10347          }
10348        }
10349        public static final com.google.protobuf.Descriptors.Descriptor
10350            getDescriptor() {
10351          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatRequestProto_descriptor;
10352        }
10353    
10354        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
10355            internalGetFieldAccessorTable() {
10356          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatRequestProto_fieldAccessorTable
10357              .ensureFieldAccessorsInitialized(
10358                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.Builder.class);
10359        }
10360    
10361        public static com.google.protobuf.Parser<FormatRequestProto> PARSER =
10362            new com.google.protobuf.AbstractParser<FormatRequestProto>() {
10363          public FormatRequestProto parsePartialFrom(
10364              com.google.protobuf.CodedInputStream input,
10365              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10366              throws com.google.protobuf.InvalidProtocolBufferException {
10367            return new FormatRequestProto(input, extensionRegistry);
10368          }
10369        };
10370    
10371        @java.lang.Override
10372        public com.google.protobuf.Parser<FormatRequestProto> getParserForType() {
10373          return PARSER;
10374        }
10375    
10376        private int bitField0_;
10377        // required .hadoop.hdfs.JournalIdProto jid = 1;
10378        public static final int JID_FIELD_NUMBER = 1;
10379        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
10380        /**
10381         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10382         */
10383        public boolean hasJid() {
10384          return ((bitField0_ & 0x00000001) == 0x00000001);
10385        }
10386        /**
10387         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10388         */
10389        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
10390          return jid_;
10391        }
10392        /**
10393         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10394         */
10395        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
10396          return jid_;
10397        }
10398    
10399        // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
10400        public static final int NSINFO_FIELD_NUMBER = 2;
10401        private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_;
10402        /**
10403         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10404         */
10405        public boolean hasNsInfo() {
10406          return ((bitField0_ & 0x00000002) == 0x00000002);
10407        }
10408        /**
10409         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10410         */
10411        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
10412          return nsInfo_;
10413        }
10414        /**
10415         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10416         */
10417        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
10418          return nsInfo_;
10419        }
10420    
10421        private void initFields() {
10422          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
10423          nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
10424        }
10425        private byte memoizedIsInitialized = -1;
10426        public final boolean isInitialized() {
10427          byte isInitialized = memoizedIsInitialized;
10428          if (isInitialized != -1) return isInitialized == 1;
10429    
10430          if (!hasJid()) {
10431            memoizedIsInitialized = 0;
10432            return false;
10433          }
10434          if (!hasNsInfo()) {
10435            memoizedIsInitialized = 0;
10436            return false;
10437          }
10438          if (!getJid().isInitialized()) {
10439            memoizedIsInitialized = 0;
10440            return false;
10441          }
10442          if (!getNsInfo().isInitialized()) {
10443            memoizedIsInitialized = 0;
10444            return false;
10445          }
10446          memoizedIsInitialized = 1;
10447          return true;
10448        }
10449    
10450        public void writeTo(com.google.protobuf.CodedOutputStream output)
10451                            throws java.io.IOException {
10452          getSerializedSize();
10453          if (((bitField0_ & 0x00000001) == 0x00000001)) {
10454            output.writeMessage(1, jid_);
10455          }
10456          if (((bitField0_ & 0x00000002) == 0x00000002)) {
10457            output.writeMessage(2, nsInfo_);
10458          }
10459          getUnknownFields().writeTo(output);
10460        }
10461    
10462        private int memoizedSerializedSize = -1;
10463        public int getSerializedSize() {
10464          int size = memoizedSerializedSize;
10465          if (size != -1) return size;
10466    
10467          size = 0;
10468          if (((bitField0_ & 0x00000001) == 0x00000001)) {
10469            size += com.google.protobuf.CodedOutputStream
10470              .computeMessageSize(1, jid_);
10471          }
10472          if (((bitField0_ & 0x00000002) == 0x00000002)) {
10473            size += com.google.protobuf.CodedOutputStream
10474              .computeMessageSize(2, nsInfo_);
10475          }
10476          size += getUnknownFields().getSerializedSize();
10477          memoizedSerializedSize = size;
10478          return size;
10479        }
10480    
10481        private static final long serialVersionUID = 0L;
10482        @java.lang.Override
10483        protected java.lang.Object writeReplace()
10484            throws java.io.ObjectStreamException {
10485          return super.writeReplace();
10486        }
10487    
10488        @java.lang.Override
10489        public boolean equals(final java.lang.Object obj) {
10490          if (obj == this) {
10491           return true;
10492          }
10493          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)) {
10494            return super.equals(obj);
10495          }
10496          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto) obj;
10497    
10498          boolean result = true;
10499          result = result && (hasJid() == other.hasJid());
10500          if (hasJid()) {
10501            result = result && getJid()
10502                .equals(other.getJid());
10503          }
10504          result = result && (hasNsInfo() == other.hasNsInfo());
10505          if (hasNsInfo()) {
10506            result = result && getNsInfo()
10507                .equals(other.getNsInfo());
10508          }
10509          result = result &&
10510              getUnknownFields().equals(other.getUnknownFields());
10511          return result;
10512        }
10513    
10514        private int memoizedHashCode = 0;
10515        @java.lang.Override
10516        public int hashCode() {
10517          if (memoizedHashCode != 0) {
10518            return memoizedHashCode;
10519          }
10520          int hash = 41;
10521          hash = (19 * hash) + getDescriptorForType().hashCode();
10522          if (hasJid()) {
10523            hash = (37 * hash) + JID_FIELD_NUMBER;
10524            hash = (53 * hash) + getJid().hashCode();
10525          }
10526          if (hasNsInfo()) {
10527            hash = (37 * hash) + NSINFO_FIELD_NUMBER;
10528            hash = (53 * hash) + getNsInfo().hashCode();
10529          }
10530          hash = (29 * hash) + getUnknownFields().hashCode();
10531          memoizedHashCode = hash;
10532          return hash;
10533        }
10534    
10535        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
10536            com.google.protobuf.ByteString data)
10537            throws com.google.protobuf.InvalidProtocolBufferException {
10538          return PARSER.parseFrom(data);
10539        }
10540        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
10541            com.google.protobuf.ByteString data,
10542            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10543            throws com.google.protobuf.InvalidProtocolBufferException {
10544          return PARSER.parseFrom(data, extensionRegistry);
10545        }
10546        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(byte[] data)
10547            throws com.google.protobuf.InvalidProtocolBufferException {
10548          return PARSER.parseFrom(data);
10549        }
10550        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
10551            byte[] data,
10552            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10553            throws com.google.protobuf.InvalidProtocolBufferException {
10554          return PARSER.parseFrom(data, extensionRegistry);
10555        }
10556        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(java.io.InputStream input)
10557            throws java.io.IOException {
10558          return PARSER.parseFrom(input);
10559        }
10560        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
10561            java.io.InputStream input,
10562            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10563            throws java.io.IOException {
10564          return PARSER.parseFrom(input, extensionRegistry);
10565        }
10566        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseDelimitedFrom(java.io.InputStream input)
10567            throws java.io.IOException {
10568          return PARSER.parseDelimitedFrom(input);
10569        }
10570        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseDelimitedFrom(
10571            java.io.InputStream input,
10572            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10573            throws java.io.IOException {
10574          return PARSER.parseDelimitedFrom(input, extensionRegistry);
10575        }
10576        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
10577            com.google.protobuf.CodedInputStream input)
10578            throws java.io.IOException {
10579          return PARSER.parseFrom(input);
10580        }
10581        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
10582            com.google.protobuf.CodedInputStream input,
10583            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10584            throws java.io.IOException {
10585          return PARSER.parseFrom(input, extensionRegistry);
10586        }
10587    
10588        public static Builder newBuilder() { return Builder.create(); }
10589        public Builder newBuilderForType() { return newBuilder(); }
10590        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto prototype) {
10591          return newBuilder().mergeFrom(prototype);
10592        }
10593        public Builder toBuilder() { return newBuilder(this); }
10594    
10595        @java.lang.Override
10596        protected Builder newBuilderForType(
10597            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
10598          Builder builder = new Builder(parent);
10599          return builder;
10600        }
10601        /**
10602         * Protobuf type {@code hadoop.hdfs.FormatRequestProto}
10603         *
10604         * <pre>
10605         **
10606         * format()
10607         * </pre>
10608         */
10609        public static final class Builder extends
10610            com.google.protobuf.GeneratedMessage.Builder<Builder>
10611           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProtoOrBuilder {
10612          public static final com.google.protobuf.Descriptors.Descriptor
10613              getDescriptor() {
10614            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatRequestProto_descriptor;
10615          }
10616    
10617          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
10618              internalGetFieldAccessorTable() {
10619            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatRequestProto_fieldAccessorTable
10620                .ensureFieldAccessorsInitialized(
10621                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.Builder.class);
10622          }
10623    
10624          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.newBuilder()
10625          private Builder() {
10626            maybeForceBuilderInitialization();
10627          }
10628    
10629          private Builder(
10630              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
10631            super(parent);
10632            maybeForceBuilderInitialization();
10633          }
10634          private void maybeForceBuilderInitialization() {
10635            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
10636              getJidFieldBuilder();
10637              getNsInfoFieldBuilder();
10638            }
10639          }
10640          private static Builder create() {
10641            return new Builder();
10642          }
10643    
10644          public Builder clear() {
10645            super.clear();
10646            if (jidBuilder_ == null) {
10647              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
10648            } else {
10649              jidBuilder_.clear();
10650            }
10651            bitField0_ = (bitField0_ & ~0x00000001);
10652            if (nsInfoBuilder_ == null) {
10653              nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
10654            } else {
10655              nsInfoBuilder_.clear();
10656            }
10657            bitField0_ = (bitField0_ & ~0x00000002);
10658            return this;
10659          }
10660    
10661          public Builder clone() {
10662            return create().mergeFrom(buildPartial());
10663          }
10664    
10665          public com.google.protobuf.Descriptors.Descriptor
10666              getDescriptorForType() {
10667            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatRequestProto_descriptor;
10668          }
10669    
10670          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto getDefaultInstanceForType() {
10671            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance();
10672          }
10673    
10674          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto build() {
10675            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto result = buildPartial();
10676            if (!result.isInitialized()) {
10677              throw newUninitializedMessageException(result);
10678            }
10679            return result;
10680          }
10681    
10682          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto buildPartial() {
10683            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto(this);
10684            int from_bitField0_ = bitField0_;
10685            int to_bitField0_ = 0;
10686            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
10687              to_bitField0_ |= 0x00000001;
10688            }
10689            if (jidBuilder_ == null) {
10690              result.jid_ = jid_;
10691            } else {
10692              result.jid_ = jidBuilder_.build();
10693            }
10694            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
10695              to_bitField0_ |= 0x00000002;
10696            }
10697            if (nsInfoBuilder_ == null) {
10698              result.nsInfo_ = nsInfo_;
10699            } else {
10700              result.nsInfo_ = nsInfoBuilder_.build();
10701            }
10702            result.bitField0_ = to_bitField0_;
10703            onBuilt();
10704            return result;
10705          }
10706    
10707          public Builder mergeFrom(com.google.protobuf.Message other) {
10708            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto) {
10709              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)other);
10710            } else {
10711              super.mergeFrom(other);
10712              return this;
10713            }
10714          }
10715    
10716          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto other) {
10717            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance()) return this;
10718            if (other.hasJid()) {
10719              mergeJid(other.getJid());
10720            }
10721            if (other.hasNsInfo()) {
10722              mergeNsInfo(other.getNsInfo());
10723            }
10724            this.mergeUnknownFields(other.getUnknownFields());
10725            return this;
10726          }
10727    
10728          public final boolean isInitialized() {
10729            if (!hasJid()) {
10730              
10731              return false;
10732            }
10733            if (!hasNsInfo()) {
10734              
10735              return false;
10736            }
10737            if (!getJid().isInitialized()) {
10738              
10739              return false;
10740            }
10741            if (!getNsInfo().isInitialized()) {
10742              
10743              return false;
10744            }
10745            return true;
10746          }
10747    
10748          public Builder mergeFrom(
10749              com.google.protobuf.CodedInputStream input,
10750              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10751              throws java.io.IOException {
10752            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parsedMessage = null;
10753            try {
10754              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
10755            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
10756              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto) e.getUnfinishedMessage();
10757              throw e;
10758            } finally {
10759              if (parsedMessage != null) {
10760                mergeFrom(parsedMessage);
10761              }
10762            }
10763            return this;
10764          }
10765          private int bitField0_;
10766    
10767          // required .hadoop.hdfs.JournalIdProto jid = 1;
10768          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
10769          private com.google.protobuf.SingleFieldBuilder<
10770              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
10771          /**
10772           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10773           */
10774          public boolean hasJid() {
10775            return ((bitField0_ & 0x00000001) == 0x00000001);
10776          }
10777          /**
10778           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10779           */
10780          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
10781            if (jidBuilder_ == null) {
10782              return jid_;
10783            } else {
10784              return jidBuilder_.getMessage();
10785            }
10786          }
10787          /**
10788           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10789           */
10790          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
10791            if (jidBuilder_ == null) {
10792              if (value == null) {
10793                throw new NullPointerException();
10794              }
10795              jid_ = value;
10796              onChanged();
10797            } else {
10798              jidBuilder_.setMessage(value);
10799            }
10800            bitField0_ |= 0x00000001;
10801            return this;
10802          }
10803          /**
10804           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10805           */
10806          public Builder setJid(
10807              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
10808            if (jidBuilder_ == null) {
10809              jid_ = builderForValue.build();
10810              onChanged();
10811            } else {
10812              jidBuilder_.setMessage(builderForValue.build());
10813            }
10814            bitField0_ |= 0x00000001;
10815            return this;
10816          }
10817          /**
10818           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10819           */
10820          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
10821            if (jidBuilder_ == null) {
10822              if (((bitField0_ & 0x00000001) == 0x00000001) &&
10823                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
10824                jid_ =
10825                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
10826              } else {
10827                jid_ = value;
10828              }
10829              onChanged();
10830            } else {
10831              jidBuilder_.mergeFrom(value);
10832            }
10833            bitField0_ |= 0x00000001;
10834            return this;
10835          }
10836          /**
10837           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10838           */
10839          public Builder clearJid() {
10840            if (jidBuilder_ == null) {
10841              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
10842              onChanged();
10843            } else {
10844              jidBuilder_.clear();
10845            }
10846            bitField0_ = (bitField0_ & ~0x00000001);
10847            return this;
10848          }
10849          /**
10850           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10851           */
10852          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
10853            bitField0_ |= 0x00000001;
10854            onChanged();
10855            return getJidFieldBuilder().getBuilder();
10856          }
10857          /**
10858           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10859           */
10860          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
10861            if (jidBuilder_ != null) {
10862              return jidBuilder_.getMessageOrBuilder();
10863            } else {
10864              return jid_;
10865            }
10866          }
10867          /**
10868           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10869           */
10870          private com.google.protobuf.SingleFieldBuilder<
10871              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
10872              getJidFieldBuilder() {
10873            if (jidBuilder_ == null) {
10874              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
10875                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
10876                      jid_,
10877                      getParentForChildren(),
10878                      isClean());
10879              jid_ = null;
10880            }
10881            return jidBuilder_;
10882          }
10883    
10884          // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
10885          private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
10886          private com.google.protobuf.SingleFieldBuilder<
10887              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> nsInfoBuilder_;
10888          /**
10889           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10890           */
10891          public boolean hasNsInfo() {
10892            return ((bitField0_ & 0x00000002) == 0x00000002);
10893          }
10894          /**
10895           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10896           */
10897          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
10898            if (nsInfoBuilder_ == null) {
10899              return nsInfo_;
10900            } else {
10901              return nsInfoBuilder_.getMessage();
10902            }
10903          }
10904          /**
10905           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10906           */
10907          public Builder setNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
10908            if (nsInfoBuilder_ == null) {
10909              if (value == null) {
10910                throw new NullPointerException();
10911              }
10912              nsInfo_ = value;
10913              onChanged();
10914            } else {
10915              nsInfoBuilder_.setMessage(value);
10916            }
10917            bitField0_ |= 0x00000002;
10918            return this;
10919          }
10920          /**
10921           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10922           */
10923          public Builder setNsInfo(
10924              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder builderForValue) {
10925            if (nsInfoBuilder_ == null) {
10926              nsInfo_ = builderForValue.build();
10927              onChanged();
10928            } else {
10929              nsInfoBuilder_.setMessage(builderForValue.build());
10930            }
10931            bitField0_ |= 0x00000002;
10932            return this;
10933          }
10934          /**
10935           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10936           */
10937          public Builder mergeNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
10938            if (nsInfoBuilder_ == null) {
10939              if (((bitField0_ & 0x00000002) == 0x00000002) &&
10940                  nsInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance()) {
10941                nsInfo_ =
10942                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.newBuilder(nsInfo_).mergeFrom(value).buildPartial();
10943              } else {
10944                nsInfo_ = value;
10945              }
10946              onChanged();
10947            } else {
10948              nsInfoBuilder_.mergeFrom(value);
10949            }
10950            bitField0_ |= 0x00000002;
10951            return this;
10952          }
10953          /**
10954           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10955           */
10956          public Builder clearNsInfo() {
10957            if (nsInfoBuilder_ == null) {
10958              nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
10959              onChanged();
10960            } else {
10961              nsInfoBuilder_.clear();
10962            }
10963            bitField0_ = (bitField0_ & ~0x00000002);
10964            return this;
10965          }
10966          /**
10967           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10968           */
10969          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder getNsInfoBuilder() {
10970            bitField0_ |= 0x00000002;
10971            onChanged();
10972            return getNsInfoFieldBuilder().getBuilder();
10973          }
10974          /**
10975           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10976           */
10977          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
10978            if (nsInfoBuilder_ != null) {
10979              return nsInfoBuilder_.getMessageOrBuilder();
10980            } else {
10981              return nsInfo_;
10982            }
10983          }
10984          /**
10985           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10986           */
10987          private com.google.protobuf.SingleFieldBuilder<
10988              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> 
10989              getNsInfoFieldBuilder() {
10990            if (nsInfoBuilder_ == null) {
10991              nsInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
10992                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder>(
10993                      nsInfo_,
10994                      getParentForChildren(),
10995                      isClean());
10996              nsInfo_ = null;
10997            }
10998            return nsInfoBuilder_;
10999          }
11000    
11001          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FormatRequestProto)
11002        }
11003    
11004        static {
11005          defaultInstance = new FormatRequestProto(true);
11006          defaultInstance.initFields();
11007        }
11008    
11009        // @@protoc_insertion_point(class_scope:hadoop.hdfs.FormatRequestProto)
11010      }
11011    
11012      public interface FormatResponseProtoOrBuilder
11013          extends com.google.protobuf.MessageOrBuilder {
11014      }
11015      /**
11016       * Protobuf type {@code hadoop.hdfs.FormatResponseProto}
11017       */
11018      public static final class FormatResponseProto extends
11019          com.google.protobuf.GeneratedMessage
11020          implements FormatResponseProtoOrBuilder {
11021        // Use FormatResponseProto.newBuilder() to construct.
11022        private FormatResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
11023          super(builder);
11024          this.unknownFields = builder.getUnknownFields();
11025        }
11026        private FormatResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
11027    
11028        private static final FormatResponseProto defaultInstance;
11029        public static FormatResponseProto getDefaultInstance() {
11030          return defaultInstance;
11031        }
11032    
11033        public FormatResponseProto getDefaultInstanceForType() {
11034          return defaultInstance;
11035        }
11036    
11037        private final com.google.protobuf.UnknownFieldSet unknownFields;
11038        @java.lang.Override
11039        public final com.google.protobuf.UnknownFieldSet
11040            getUnknownFields() {
11041          return this.unknownFields;
11042        }
11043        private FormatResponseProto(
11044            com.google.protobuf.CodedInputStream input,
11045            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11046            throws com.google.protobuf.InvalidProtocolBufferException {
11047          initFields();
11048          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
11049              com.google.protobuf.UnknownFieldSet.newBuilder();
11050          try {
11051            boolean done = false;
11052            while (!done) {
11053              int tag = input.readTag();
11054              switch (tag) {
11055                case 0:
11056                  done = true;
11057                  break;
11058                default: {
11059                  if (!parseUnknownField(input, unknownFields,
11060                                         extensionRegistry, tag)) {
11061                    done = true;
11062                  }
11063                  break;
11064                }
11065              }
11066            }
11067          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
11068            throw e.setUnfinishedMessage(this);
11069          } catch (java.io.IOException e) {
11070            throw new com.google.protobuf.InvalidProtocolBufferException(
11071                e.getMessage()).setUnfinishedMessage(this);
11072          } finally {
11073            this.unknownFields = unknownFields.build();
11074            makeExtensionsImmutable();
11075          }
11076        }
11077        public static final com.google.protobuf.Descriptors.Descriptor
11078            getDescriptor() {
11079          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatResponseProto_descriptor;
11080        }
11081    
11082        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11083            internalGetFieldAccessorTable() {
11084          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatResponseProto_fieldAccessorTable
11085              .ensureFieldAccessorsInitialized(
11086                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.Builder.class);
11087        }
11088    
11089        public static com.google.protobuf.Parser<FormatResponseProto> PARSER =
11090            new com.google.protobuf.AbstractParser<FormatResponseProto>() {
11091          public FormatResponseProto parsePartialFrom(
11092              com.google.protobuf.CodedInputStream input,
11093              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11094              throws com.google.protobuf.InvalidProtocolBufferException {
11095            return new FormatResponseProto(input, extensionRegistry);
11096          }
11097        };
11098    
11099        @java.lang.Override
11100        public com.google.protobuf.Parser<FormatResponseProto> getParserForType() {
11101          return PARSER;
11102        }
11103    
11104        private void initFields() {
11105        }
11106        private byte memoizedIsInitialized = -1;
11107        public final boolean isInitialized() {
11108          byte isInitialized = memoizedIsInitialized;
11109          if (isInitialized != -1) return isInitialized == 1;
11110    
11111          memoizedIsInitialized = 1;
11112          return true;
11113        }
11114    
11115        public void writeTo(com.google.protobuf.CodedOutputStream output)
11116                            throws java.io.IOException {
11117          getSerializedSize();
11118          getUnknownFields().writeTo(output);
11119        }
11120    
11121        private int memoizedSerializedSize = -1;
11122        public int getSerializedSize() {
11123          int size = memoizedSerializedSize;
11124          if (size != -1) return size;
11125    
11126          size = 0;
11127          size += getUnknownFields().getSerializedSize();
11128          memoizedSerializedSize = size;
11129          return size;
11130        }
11131    
11132        private static final long serialVersionUID = 0L;
11133        @java.lang.Override
11134        protected java.lang.Object writeReplace()
11135            throws java.io.ObjectStreamException {
11136          return super.writeReplace();
11137        }
11138    
11139        @java.lang.Override
11140        public boolean equals(final java.lang.Object obj) {
11141          if (obj == this) {
11142           return true;
11143          }
11144          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto)) {
11145            return super.equals(obj);
11146          }
11147          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) obj;
11148    
11149          boolean result = true;
11150          result = result &&
11151              getUnknownFields().equals(other.getUnknownFields());
11152          return result;
11153        }
11154    
11155        private int memoizedHashCode = 0;
11156        @java.lang.Override
11157        public int hashCode() {
11158          if (memoizedHashCode != 0) {
11159            return memoizedHashCode;
11160          }
11161          int hash = 41;
11162          hash = (19 * hash) + getDescriptorForType().hashCode();
11163          hash = (29 * hash) + getUnknownFields().hashCode();
11164          memoizedHashCode = hash;
11165          return hash;
11166        }
11167    
11168        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
11169            com.google.protobuf.ByteString data)
11170            throws com.google.protobuf.InvalidProtocolBufferException {
11171          return PARSER.parseFrom(data);
11172        }
11173        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
11174            com.google.protobuf.ByteString data,
11175            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11176            throws com.google.protobuf.InvalidProtocolBufferException {
11177          return PARSER.parseFrom(data, extensionRegistry);
11178        }
11179        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(byte[] data)
11180            throws com.google.protobuf.InvalidProtocolBufferException {
11181          return PARSER.parseFrom(data);
11182        }
11183        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
11184            byte[] data,
11185            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11186            throws com.google.protobuf.InvalidProtocolBufferException {
11187          return PARSER.parseFrom(data, extensionRegistry);
11188        }
11189        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(java.io.InputStream input)
11190            throws java.io.IOException {
11191          return PARSER.parseFrom(input);
11192        }
11193        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
11194            java.io.InputStream input,
11195            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11196            throws java.io.IOException {
11197          return PARSER.parseFrom(input, extensionRegistry);
11198        }
11199        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseDelimitedFrom(java.io.InputStream input)
11200            throws java.io.IOException {
11201          return PARSER.parseDelimitedFrom(input);
11202        }
11203        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseDelimitedFrom(
11204            java.io.InputStream input,
11205            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11206            throws java.io.IOException {
11207          return PARSER.parseDelimitedFrom(input, extensionRegistry);
11208        }
11209        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
11210            com.google.protobuf.CodedInputStream input)
11211            throws java.io.IOException {
11212          return PARSER.parseFrom(input);
11213        }
11214        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
11215            com.google.protobuf.CodedInputStream input,
11216            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11217            throws java.io.IOException {
11218          return PARSER.parseFrom(input, extensionRegistry);
11219        }
11220    
11221        public static Builder newBuilder() { return Builder.create(); }
11222        public Builder newBuilderForType() { return newBuilder(); }
11223        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto prototype) {
11224          return newBuilder().mergeFrom(prototype);
11225        }
11226        public Builder toBuilder() { return newBuilder(this); }
11227    
11228        @java.lang.Override
11229        protected Builder newBuilderForType(
11230            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
11231          Builder builder = new Builder(parent);
11232          return builder;
11233        }
11234        /**
11235         * Protobuf type {@code hadoop.hdfs.FormatResponseProto}
11236         */
11237        public static final class Builder extends
11238            com.google.protobuf.GeneratedMessage.Builder<Builder>
11239           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProtoOrBuilder {
11240          public static final com.google.protobuf.Descriptors.Descriptor
11241              getDescriptor() {
11242            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatResponseProto_descriptor;
11243          }
11244    
11245          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11246              internalGetFieldAccessorTable() {
11247            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatResponseProto_fieldAccessorTable
11248                .ensureFieldAccessorsInitialized(
11249                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.Builder.class);
11250          }
11251    
11252          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.newBuilder()
11253          private Builder() {
11254            maybeForceBuilderInitialization();
11255          }
11256    
11257          private Builder(
11258              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
11259            super(parent);
11260            maybeForceBuilderInitialization();
11261          }
11262          private void maybeForceBuilderInitialization() {
11263            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
11264            }
11265          }
11266          private static Builder create() {
11267            return new Builder();
11268          }
11269    
11270          public Builder clear() {
11271            super.clear();
11272            return this;
11273          }
11274    
11275          public Builder clone() {
11276            return create().mergeFrom(buildPartial());
11277          }
11278    
11279          public com.google.protobuf.Descriptors.Descriptor
11280              getDescriptorForType() {
11281            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatResponseProto_descriptor;
11282          }
11283    
11284          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto getDefaultInstanceForType() {
11285            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance();
11286          }
11287    
11288          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto build() {
11289            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto result = buildPartial();
11290            if (!result.isInitialized()) {
11291              throw newUninitializedMessageException(result);
11292            }
11293            return result;
11294          }
11295    
11296          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto buildPartial() {
11297            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto(this);
11298            onBuilt();
11299            return result;
11300          }
11301    
11302          public Builder mergeFrom(com.google.protobuf.Message other) {
11303            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) {
11304              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto)other);
11305            } else {
11306              super.mergeFrom(other);
11307              return this;
11308            }
11309          }
11310    
11311          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto other) {
11312            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance()) return this;
11313            this.mergeUnknownFields(other.getUnknownFields());
11314            return this;
11315          }
11316    
11317          public final boolean isInitialized() {
11318            return true;
11319          }
11320    
11321          public Builder mergeFrom(
11322              com.google.protobuf.CodedInputStream input,
11323              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11324              throws java.io.IOException {
11325            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parsedMessage = null;
11326            try {
11327              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
11328            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
11329              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) e.getUnfinishedMessage();
11330              throw e;
11331            } finally {
11332              if (parsedMessage != null) {
11333                mergeFrom(parsedMessage);
11334              }
11335            }
11336            return this;
11337          }
11338    
11339          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FormatResponseProto)
11340        }
11341    
11342        static {
11343          defaultInstance = new FormatResponseProto(true);
11344          defaultInstance.initFields();
11345        }
11346    
11347        // @@protoc_insertion_point(class_scope:hadoop.hdfs.FormatResponseProto)
11348      }
11349    
11350      public interface NewEpochRequestProtoOrBuilder
11351          extends com.google.protobuf.MessageOrBuilder {
11352    
11353        // required .hadoop.hdfs.JournalIdProto jid = 1;
11354        /**
11355         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11356         */
11357        boolean hasJid();
11358        /**
11359         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11360         */
11361        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
11362        /**
11363         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11364         */
11365        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
11366    
11367        // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
11368        /**
11369         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11370         */
11371        boolean hasNsInfo();
11372        /**
11373         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11374         */
11375        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo();
11376        /**
11377         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11378         */
11379        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder();
11380    
11381        // required uint64 epoch = 3;
11382        /**
11383         * <code>required uint64 epoch = 3;</code>
11384         */
11385        boolean hasEpoch();
11386        /**
11387         * <code>required uint64 epoch = 3;</code>
11388         */
11389        long getEpoch();
11390      }
11391      /**
11392       * Protobuf type {@code hadoop.hdfs.NewEpochRequestProto}
11393       *
11394       * <pre>
11395       **
11396       * newEpoch()
11397       * </pre>
11398       */
11399      public static final class NewEpochRequestProto extends
11400          com.google.protobuf.GeneratedMessage
11401          implements NewEpochRequestProtoOrBuilder {
11402        // Use NewEpochRequestProto.newBuilder() to construct.
11403        private NewEpochRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
11404          super(builder);
11405          this.unknownFields = builder.getUnknownFields();
11406        }
11407        private NewEpochRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
11408    
11409        private static final NewEpochRequestProto defaultInstance;
11410        public static NewEpochRequestProto getDefaultInstance() {
11411          return defaultInstance;
11412        }
11413    
11414        public NewEpochRequestProto getDefaultInstanceForType() {
11415          return defaultInstance;
11416        }
11417    
11418        private final com.google.protobuf.UnknownFieldSet unknownFields;
11419        @java.lang.Override
11420        public final com.google.protobuf.UnknownFieldSet
11421            getUnknownFields() {
11422          return this.unknownFields;
11423        }
11424        private NewEpochRequestProto(
11425            com.google.protobuf.CodedInputStream input,
11426            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11427            throws com.google.protobuf.InvalidProtocolBufferException {
11428          initFields();
11429          int mutable_bitField0_ = 0;
11430          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
11431              com.google.protobuf.UnknownFieldSet.newBuilder();
11432          try {
11433            boolean done = false;
11434            while (!done) {
11435              int tag = input.readTag();
11436              switch (tag) {
11437                case 0:
11438                  done = true;
11439                  break;
11440                default: {
11441                  if (!parseUnknownField(input, unknownFields,
11442                                         extensionRegistry, tag)) {
11443                    done = true;
11444                  }
11445                  break;
11446                }
11447                case 10: {
11448                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
11449                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
11450                    subBuilder = jid_.toBuilder();
11451                  }
11452                  jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
11453                  if (subBuilder != null) {
11454                    subBuilder.mergeFrom(jid_);
11455                    jid_ = subBuilder.buildPartial();
11456                  }
11457                  bitField0_ |= 0x00000001;
11458                  break;
11459                }
11460                case 18: {
11461                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder subBuilder = null;
11462                  if (((bitField0_ & 0x00000002) == 0x00000002)) {
11463                    subBuilder = nsInfo_.toBuilder();
11464                  }
11465                  nsInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.PARSER, extensionRegistry);
11466                  if (subBuilder != null) {
11467                    subBuilder.mergeFrom(nsInfo_);
11468                    nsInfo_ = subBuilder.buildPartial();
11469                  }
11470                  bitField0_ |= 0x00000002;
11471                  break;
11472                }
11473                case 24: {
11474                  bitField0_ |= 0x00000004;
11475                  epoch_ = input.readUInt64();
11476                  break;
11477                }
11478              }
11479            }
11480          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
11481            throw e.setUnfinishedMessage(this);
11482          } catch (java.io.IOException e) {
11483            throw new com.google.protobuf.InvalidProtocolBufferException(
11484                e.getMessage()).setUnfinishedMessage(this);
11485          } finally {
11486            this.unknownFields = unknownFields.build();
11487            makeExtensionsImmutable();
11488          }
11489        }
11490        public static final com.google.protobuf.Descriptors.Descriptor
11491            getDescriptor() {
11492          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor;
11493        }
11494    
11495        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11496            internalGetFieldAccessorTable() {
11497          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochRequestProto_fieldAccessorTable
11498              .ensureFieldAccessorsInitialized(
11499                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.Builder.class);
11500        }
11501    
11502        public static com.google.protobuf.Parser<NewEpochRequestProto> PARSER =
11503            new com.google.protobuf.AbstractParser<NewEpochRequestProto>() {
11504          public NewEpochRequestProto parsePartialFrom(
11505              com.google.protobuf.CodedInputStream input,
11506              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11507              throws com.google.protobuf.InvalidProtocolBufferException {
11508            return new NewEpochRequestProto(input, extensionRegistry);
11509          }
11510        };
11511    
11512        @java.lang.Override
11513        public com.google.protobuf.Parser<NewEpochRequestProto> getParserForType() {
11514          return PARSER;
11515        }
11516    
11517        private int bitField0_;
11518        // required .hadoop.hdfs.JournalIdProto jid = 1;
11519        public static final int JID_FIELD_NUMBER = 1;
11520        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
11521        /**
11522         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11523         */
11524        public boolean hasJid() {
11525          return ((bitField0_ & 0x00000001) == 0x00000001);
11526        }
11527        /**
11528         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11529         */
11530        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
11531          return jid_;
11532        }
11533        /**
11534         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11535         */
11536        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
11537          return jid_;
11538        }
11539    
11540        // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
11541        public static final int NSINFO_FIELD_NUMBER = 2;
11542        private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_;
11543        /**
11544         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11545         */
11546        public boolean hasNsInfo() {
11547          return ((bitField0_ & 0x00000002) == 0x00000002);
11548        }
11549        /**
11550         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11551         */
11552        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
11553          return nsInfo_;
11554        }
11555        /**
11556         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11557         */
11558        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
11559          return nsInfo_;
11560        }
11561    
11562        // required uint64 epoch = 3;
11563        public static final int EPOCH_FIELD_NUMBER = 3;
11564        private long epoch_;
11565        /**
11566         * <code>required uint64 epoch = 3;</code>
11567         */
11568        public boolean hasEpoch() {
11569          return ((bitField0_ & 0x00000004) == 0x00000004);
11570        }
11571        /**
11572         * <code>required uint64 epoch = 3;</code>
11573         */
11574        public long getEpoch() {
11575          return epoch_;
11576        }
11577    
11578        private void initFields() {
11579          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
11580          nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
11581          epoch_ = 0L;
11582        }
11583        private byte memoizedIsInitialized = -1;
11584        public final boolean isInitialized() {
11585          byte isInitialized = memoizedIsInitialized;
11586          if (isInitialized != -1) return isInitialized == 1;
11587    
11588          if (!hasJid()) {
11589            memoizedIsInitialized = 0;
11590            return false;
11591          }
11592          if (!hasNsInfo()) {
11593            memoizedIsInitialized = 0;
11594            return false;
11595          }
11596          if (!hasEpoch()) {
11597            memoizedIsInitialized = 0;
11598            return false;
11599          }
11600          if (!getJid().isInitialized()) {
11601            memoizedIsInitialized = 0;
11602            return false;
11603          }
11604          if (!getNsInfo().isInitialized()) {
11605            memoizedIsInitialized = 0;
11606            return false;
11607          }
11608          memoizedIsInitialized = 1;
11609          return true;
11610        }
11611    
11612        public void writeTo(com.google.protobuf.CodedOutputStream output)
11613                            throws java.io.IOException {
11614          getSerializedSize();
11615          if (((bitField0_ & 0x00000001) == 0x00000001)) {
11616            output.writeMessage(1, jid_);
11617          }
11618          if (((bitField0_ & 0x00000002) == 0x00000002)) {
11619            output.writeMessage(2, nsInfo_);
11620          }
11621          if (((bitField0_ & 0x00000004) == 0x00000004)) {
11622            output.writeUInt64(3, epoch_);
11623          }
11624          getUnknownFields().writeTo(output);
11625        }
11626    
11627        private int memoizedSerializedSize = -1;
11628        public int getSerializedSize() {
11629          int size = memoizedSerializedSize;
11630          if (size != -1) return size;
11631    
11632          size = 0;
11633          if (((bitField0_ & 0x00000001) == 0x00000001)) {
11634            size += com.google.protobuf.CodedOutputStream
11635              .computeMessageSize(1, jid_);
11636          }
11637          if (((bitField0_ & 0x00000002) == 0x00000002)) {
11638            size += com.google.protobuf.CodedOutputStream
11639              .computeMessageSize(2, nsInfo_);
11640          }
11641          if (((bitField0_ & 0x00000004) == 0x00000004)) {
11642            size += com.google.protobuf.CodedOutputStream
11643              .computeUInt64Size(3, epoch_);
11644          }
11645          size += getUnknownFields().getSerializedSize();
11646          memoizedSerializedSize = size;
11647          return size;
11648        }
11649    
11650        private static final long serialVersionUID = 0L;
11651        @java.lang.Override
11652        protected java.lang.Object writeReplace()
11653            throws java.io.ObjectStreamException {
11654          return super.writeReplace();
11655        }
11656    
11657        @java.lang.Override
11658        public boolean equals(final java.lang.Object obj) {
11659          if (obj == this) {
11660           return true;
11661          }
11662          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)) {
11663            return super.equals(obj);
11664          }
11665          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto) obj;
11666    
11667          boolean result = true;
11668          result = result && (hasJid() == other.hasJid());
11669          if (hasJid()) {
11670            result = result && getJid()
11671                .equals(other.getJid());
11672          }
11673          result = result && (hasNsInfo() == other.hasNsInfo());
11674          if (hasNsInfo()) {
11675            result = result && getNsInfo()
11676                .equals(other.getNsInfo());
11677          }
11678          result = result && (hasEpoch() == other.hasEpoch());
11679          if (hasEpoch()) {
11680            result = result && (getEpoch()
11681                == other.getEpoch());
11682          }
11683          result = result &&
11684              getUnknownFields().equals(other.getUnknownFields());
11685          return result;
11686        }
11687    
11688        private int memoizedHashCode = 0;
11689        @java.lang.Override
11690        public int hashCode() {
11691          if (memoizedHashCode != 0) {
11692            return memoizedHashCode;
11693          }
11694          int hash = 41;
11695          hash = (19 * hash) + getDescriptorForType().hashCode();
11696          if (hasJid()) {
11697            hash = (37 * hash) + JID_FIELD_NUMBER;
11698            hash = (53 * hash) + getJid().hashCode();
11699          }
11700          if (hasNsInfo()) {
11701            hash = (37 * hash) + NSINFO_FIELD_NUMBER;
11702            hash = (53 * hash) + getNsInfo().hashCode();
11703          }
11704          if (hasEpoch()) {
11705            hash = (37 * hash) + EPOCH_FIELD_NUMBER;
11706            hash = (53 * hash) + hashLong(getEpoch());
11707          }
11708          hash = (29 * hash) + getUnknownFields().hashCode();
11709          memoizedHashCode = hash;
11710          return hash;
11711        }
11712    
11713        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
11714            com.google.protobuf.ByteString data)
11715            throws com.google.protobuf.InvalidProtocolBufferException {
11716          return PARSER.parseFrom(data);
11717        }
11718        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
11719            com.google.protobuf.ByteString data,
11720            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11721            throws com.google.protobuf.InvalidProtocolBufferException {
11722          return PARSER.parseFrom(data, extensionRegistry);
11723        }
11724        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(byte[] data)
11725            throws com.google.protobuf.InvalidProtocolBufferException {
11726          return PARSER.parseFrom(data);
11727        }
11728        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
11729            byte[] data,
11730            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11731            throws com.google.protobuf.InvalidProtocolBufferException {
11732          return PARSER.parseFrom(data, extensionRegistry);
11733        }
11734        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(java.io.InputStream input)
11735            throws java.io.IOException {
11736          return PARSER.parseFrom(input);
11737        }
11738        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
11739            java.io.InputStream input,
11740            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11741            throws java.io.IOException {
11742          return PARSER.parseFrom(input, extensionRegistry);
11743        }
11744        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseDelimitedFrom(java.io.InputStream input)
11745            throws java.io.IOException {
11746          return PARSER.parseDelimitedFrom(input);
11747        }
11748        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseDelimitedFrom(
11749            java.io.InputStream input,
11750            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11751            throws java.io.IOException {
11752          return PARSER.parseDelimitedFrom(input, extensionRegistry);
11753        }
11754        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
11755            com.google.protobuf.CodedInputStream input)
11756            throws java.io.IOException {
11757          return PARSER.parseFrom(input);
11758        }
11759        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
11760            com.google.protobuf.CodedInputStream input,
11761            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11762            throws java.io.IOException {
11763          return PARSER.parseFrom(input, extensionRegistry);
11764        }
11765    
11766        public static Builder newBuilder() { return Builder.create(); }
11767        public Builder newBuilderForType() { return newBuilder(); }
11768        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto prototype) {
11769          return newBuilder().mergeFrom(prototype);
11770        }
11771        public Builder toBuilder() { return newBuilder(this); }
11772    
11773        @java.lang.Override
11774        protected Builder newBuilderForType(
11775            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
11776          Builder builder = new Builder(parent);
11777          return builder;
11778        }
11779        /**
11780         * Protobuf type {@code hadoop.hdfs.NewEpochRequestProto}
11781         *
11782         * <pre>
11783         **
11784         * newEpoch()
11785         * </pre>
11786         */
11787        public static final class Builder extends
11788            com.google.protobuf.GeneratedMessage.Builder<Builder>
11789           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProtoOrBuilder {
11790          public static final com.google.protobuf.Descriptors.Descriptor
11791              getDescriptor() {
11792            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor;
11793          }
11794    
11795          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11796              internalGetFieldAccessorTable() {
11797            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochRequestProto_fieldAccessorTable
11798                .ensureFieldAccessorsInitialized(
11799                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.Builder.class);
11800          }
11801    
11802          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.newBuilder()
11803          private Builder() {
11804            maybeForceBuilderInitialization();
11805          }
11806    
11807          private Builder(
11808              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
11809            super(parent);
11810            maybeForceBuilderInitialization();
11811          }
11812          private void maybeForceBuilderInitialization() {
11813            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
11814              getJidFieldBuilder();
11815              getNsInfoFieldBuilder();
11816            }
11817          }
11818          private static Builder create() {
11819            return new Builder();
11820          }
11821    
11822          public Builder clear() {
11823            super.clear();
11824            if (jidBuilder_ == null) {
11825              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
11826            } else {
11827              jidBuilder_.clear();
11828            }
11829            bitField0_ = (bitField0_ & ~0x00000001);
11830            if (nsInfoBuilder_ == null) {
11831              nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
11832            } else {
11833              nsInfoBuilder_.clear();
11834            }
11835            bitField0_ = (bitField0_ & ~0x00000002);
11836            epoch_ = 0L;
11837            bitField0_ = (bitField0_ & ~0x00000004);
11838            return this;
11839          }
11840    
11841          public Builder clone() {
11842            return create().mergeFrom(buildPartial());
11843          }
11844    
11845          public com.google.protobuf.Descriptors.Descriptor
11846              getDescriptorForType() {
11847            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor;
11848          }
11849    
11850          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto getDefaultInstanceForType() {
11851            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance();
11852          }
11853    
11854          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto build() {
11855            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto result = buildPartial();
11856            if (!result.isInitialized()) {
11857              throw newUninitializedMessageException(result);
11858            }
11859            return result;
11860          }
11861    
11862          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto buildPartial() {
11863            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto(this);
11864            int from_bitField0_ = bitField0_;
11865            int to_bitField0_ = 0;
11866            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
11867              to_bitField0_ |= 0x00000001;
11868            }
11869            if (jidBuilder_ == null) {
11870              result.jid_ = jid_;
11871            } else {
11872              result.jid_ = jidBuilder_.build();
11873            }
11874            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
11875              to_bitField0_ |= 0x00000002;
11876            }
11877            if (nsInfoBuilder_ == null) {
11878              result.nsInfo_ = nsInfo_;
11879            } else {
11880              result.nsInfo_ = nsInfoBuilder_.build();
11881            }
11882            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
11883              to_bitField0_ |= 0x00000004;
11884            }
11885            result.epoch_ = epoch_;
11886            result.bitField0_ = to_bitField0_;
11887            onBuilt();
11888            return result;
11889          }
11890    
11891          public Builder mergeFrom(com.google.protobuf.Message other) {
11892            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto) {
11893              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)other);
11894            } else {
11895              super.mergeFrom(other);
11896              return this;
11897            }
11898          }
11899    
11900          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto other) {
11901            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance()) return this;
11902            if (other.hasJid()) {
11903              mergeJid(other.getJid());
11904            }
11905            if (other.hasNsInfo()) {
11906              mergeNsInfo(other.getNsInfo());
11907            }
11908            if (other.hasEpoch()) {
11909              setEpoch(other.getEpoch());
11910            }
11911            this.mergeUnknownFields(other.getUnknownFields());
11912            return this;
11913          }
11914    
11915          public final boolean isInitialized() {
11916            if (!hasJid()) {
11917              
11918              return false;
11919            }
11920            if (!hasNsInfo()) {
11921              
11922              return false;
11923            }
11924            if (!hasEpoch()) {
11925              
11926              return false;
11927            }
11928            if (!getJid().isInitialized()) {
11929              
11930              return false;
11931            }
11932            if (!getNsInfo().isInitialized()) {
11933              
11934              return false;
11935            }
11936            return true;
11937          }
11938    
11939          public Builder mergeFrom(
11940              com.google.protobuf.CodedInputStream input,
11941              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11942              throws java.io.IOException {
11943            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parsedMessage = null;
11944            try {
11945              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
11946            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
11947              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto) e.getUnfinishedMessage();
11948              throw e;
11949            } finally {
11950              if (parsedMessage != null) {
11951                mergeFrom(parsedMessage);
11952              }
11953            }
11954            return this;
11955          }
11956          private int bitField0_;
11957    
11958          // required .hadoop.hdfs.JournalIdProto jid = 1;
11959          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
11960          private com.google.protobuf.SingleFieldBuilder<
11961              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
11962          /**
11963           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11964           */
11965          public boolean hasJid() {
11966            return ((bitField0_ & 0x00000001) == 0x00000001);
11967          }
11968          /**
11969           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11970           */
11971          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
11972            if (jidBuilder_ == null) {
11973              return jid_;
11974            } else {
11975              return jidBuilder_.getMessage();
11976            }
11977          }
11978          /**
11979           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11980           */
11981          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
11982            if (jidBuilder_ == null) {
11983              if (value == null) {
11984                throw new NullPointerException();
11985              }
11986              jid_ = value;
11987              onChanged();
11988            } else {
11989              jidBuilder_.setMessage(value);
11990            }
11991            bitField0_ |= 0x00000001;
11992            return this;
11993          }
11994          /**
11995           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11996           */
11997          public Builder setJid(
11998              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
11999            if (jidBuilder_ == null) {
12000              jid_ = builderForValue.build();
12001              onChanged();
12002            } else {
12003              jidBuilder_.setMessage(builderForValue.build());
12004            }
12005            bitField0_ |= 0x00000001;
12006            return this;
12007          }
12008          /**
12009           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12010           */
12011          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
12012            if (jidBuilder_ == null) {
12013              if (((bitField0_ & 0x00000001) == 0x00000001) &&
12014                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
12015                jid_ =
12016                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
12017              } else {
12018                jid_ = value;
12019              }
12020              onChanged();
12021            } else {
12022              jidBuilder_.mergeFrom(value);
12023            }
12024            bitField0_ |= 0x00000001;
12025            return this;
12026          }
12027          /**
12028           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12029           */
12030          public Builder clearJid() {
12031            if (jidBuilder_ == null) {
12032              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
12033              onChanged();
12034            } else {
12035              jidBuilder_.clear();
12036            }
12037            bitField0_ = (bitField0_ & ~0x00000001);
12038            return this;
12039          }
12040          /**
12041           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12042           */
12043          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
12044            bitField0_ |= 0x00000001;
12045            onChanged();
12046            return getJidFieldBuilder().getBuilder();
12047          }
12048          /**
12049           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12050           */
12051          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
12052            if (jidBuilder_ != null) {
12053              return jidBuilder_.getMessageOrBuilder();
12054            } else {
12055              return jid_;
12056            }
12057          }
12058          /**
12059           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12060           */
12061          private com.google.protobuf.SingleFieldBuilder<
12062              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
12063              getJidFieldBuilder() {
12064            if (jidBuilder_ == null) {
12065              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
12066                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
12067                      jid_,
12068                      getParentForChildren(),
12069                      isClean());
12070              jid_ = null;
12071            }
12072            return jidBuilder_;
12073          }
12074    
12075          // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
12076          private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
12077          private com.google.protobuf.SingleFieldBuilder<
12078              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> nsInfoBuilder_;
12079          /**
12080           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12081           */
12082          public boolean hasNsInfo() {
12083            return ((bitField0_ & 0x00000002) == 0x00000002);
12084          }
12085          /**
12086           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12087           */
12088          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
12089            if (nsInfoBuilder_ == null) {
12090              return nsInfo_;
12091            } else {
12092              return nsInfoBuilder_.getMessage();
12093            }
12094          }
12095          /**
12096           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12097           */
12098          public Builder setNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
12099            if (nsInfoBuilder_ == null) {
12100              if (value == null) {
12101                throw new NullPointerException();
12102              }
12103              nsInfo_ = value;
12104              onChanged();
12105            } else {
12106              nsInfoBuilder_.setMessage(value);
12107            }
12108            bitField0_ |= 0x00000002;
12109            return this;
12110          }
12111          /**
12112           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12113           */
12114          public Builder setNsInfo(
12115              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder builderForValue) {
12116            if (nsInfoBuilder_ == null) {
12117              nsInfo_ = builderForValue.build();
12118              onChanged();
12119            } else {
12120              nsInfoBuilder_.setMessage(builderForValue.build());
12121            }
12122            bitField0_ |= 0x00000002;
12123            return this;
12124          }
12125          /**
12126           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12127           */
12128          public Builder mergeNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
12129            if (nsInfoBuilder_ == null) {
12130              if (((bitField0_ & 0x00000002) == 0x00000002) &&
12131                  nsInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance()) {
12132                nsInfo_ =
12133                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.newBuilder(nsInfo_).mergeFrom(value).buildPartial();
12134              } else {
12135                nsInfo_ = value;
12136              }
12137              onChanged();
12138            } else {
12139              nsInfoBuilder_.mergeFrom(value);
12140            }
12141            bitField0_ |= 0x00000002;
12142            return this;
12143          }
12144          /**
12145           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12146           */
12147          public Builder clearNsInfo() {
12148            if (nsInfoBuilder_ == null) {
12149              nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
12150              onChanged();
12151            } else {
12152              nsInfoBuilder_.clear();
12153            }
12154            bitField0_ = (bitField0_ & ~0x00000002);
12155            return this;
12156          }
12157          /**
12158           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12159           */
12160          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder getNsInfoBuilder() {
12161            bitField0_ |= 0x00000002;
12162            onChanged();
12163            return getNsInfoFieldBuilder().getBuilder();
12164          }
12165          /**
12166           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12167           */
12168          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
12169            if (nsInfoBuilder_ != null) {
12170              return nsInfoBuilder_.getMessageOrBuilder();
12171            } else {
12172              return nsInfo_;
12173            }
12174          }
12175          /**
12176           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12177           */
12178          private com.google.protobuf.SingleFieldBuilder<
12179              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> 
12180              getNsInfoFieldBuilder() {
12181            if (nsInfoBuilder_ == null) {
12182              nsInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
12183                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder>(
12184                      nsInfo_,
12185                      getParentForChildren(),
12186                      isClean());
12187              nsInfo_ = null;
12188            }
12189            return nsInfoBuilder_;
12190          }
12191    
12192          // required uint64 epoch = 3;
12193          private long epoch_ ;
12194          /**
12195           * <code>required uint64 epoch = 3;</code>
12196           */
12197          public boolean hasEpoch() {
12198            return ((bitField0_ & 0x00000004) == 0x00000004);
12199          }
12200          /**
12201           * <code>required uint64 epoch = 3;</code>
12202           */
12203          public long getEpoch() {
12204            return epoch_;
12205          }
12206          /**
12207           * <code>required uint64 epoch = 3;</code>
12208           */
12209          public Builder setEpoch(long value) {
12210            bitField0_ |= 0x00000004;
12211            epoch_ = value;
12212            onChanged();
12213            return this;
12214          }
12215          /**
12216           * <code>required uint64 epoch = 3;</code>
12217           */
12218          public Builder clearEpoch() {
12219            bitField0_ = (bitField0_ & ~0x00000004);
12220            epoch_ = 0L;
12221            onChanged();
12222            return this;
12223          }
12224    
12225          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.NewEpochRequestProto)
12226        }
12227    
12228        static {
12229          defaultInstance = new NewEpochRequestProto(true);
12230          defaultInstance.initFields();
12231        }
12232    
12233        // @@protoc_insertion_point(class_scope:hadoop.hdfs.NewEpochRequestProto)
12234      }
12235    
12236      public interface NewEpochResponseProtoOrBuilder
12237          extends com.google.protobuf.MessageOrBuilder {
12238    
12239        // optional uint64 lastSegmentTxId = 1;
12240        /**
12241         * <code>optional uint64 lastSegmentTxId = 1;</code>
12242         */
12243        boolean hasLastSegmentTxId();
12244        /**
12245         * <code>optional uint64 lastSegmentTxId = 1;</code>
12246         */
12247        long getLastSegmentTxId();
12248      }
12249      /**
12250       * Protobuf type {@code hadoop.hdfs.NewEpochResponseProto}
12251       */
12252      public static final class NewEpochResponseProto extends
12253          com.google.protobuf.GeneratedMessage
12254          implements NewEpochResponseProtoOrBuilder {
12255        // Use NewEpochResponseProto.newBuilder() to construct.
12256        private NewEpochResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
12257          super(builder);
12258          this.unknownFields = builder.getUnknownFields();
12259        }
12260        private NewEpochResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
12261    
12262        private static final NewEpochResponseProto defaultInstance;
12263        public static NewEpochResponseProto getDefaultInstance() {
12264          return defaultInstance;
12265        }
12266    
12267        public NewEpochResponseProto getDefaultInstanceForType() {
12268          return defaultInstance;
12269        }
12270    
12271        private final com.google.protobuf.UnknownFieldSet unknownFields;
12272        @java.lang.Override
12273        public final com.google.protobuf.UnknownFieldSet
12274            getUnknownFields() {
12275          return this.unknownFields;
12276        }
12277        private NewEpochResponseProto(
12278            com.google.protobuf.CodedInputStream input,
12279            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12280            throws com.google.protobuf.InvalidProtocolBufferException {
12281          initFields();
12282          int mutable_bitField0_ = 0;
12283          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
12284              com.google.protobuf.UnknownFieldSet.newBuilder();
12285          try {
12286            boolean done = false;
12287            while (!done) {
12288              int tag = input.readTag();
12289              switch (tag) {
12290                case 0:
12291                  done = true;
12292                  break;
12293                default: {
12294                  if (!parseUnknownField(input, unknownFields,
12295                                         extensionRegistry, tag)) {
12296                    done = true;
12297                  }
12298                  break;
12299                }
12300                case 8: {
12301                  bitField0_ |= 0x00000001;
12302                  lastSegmentTxId_ = input.readUInt64();
12303                  break;
12304                }
12305              }
12306            }
12307          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
12308            throw e.setUnfinishedMessage(this);
12309          } catch (java.io.IOException e) {
12310            throw new com.google.protobuf.InvalidProtocolBufferException(
12311                e.getMessage()).setUnfinishedMessage(this);
12312          } finally {
12313            this.unknownFields = unknownFields.build();
12314            makeExtensionsImmutable();
12315          }
12316        }
12317        public static final com.google.protobuf.Descriptors.Descriptor
12318            getDescriptor() {
12319          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor;
12320        }
12321    
12322        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
12323            internalGetFieldAccessorTable() {
12324          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochResponseProto_fieldAccessorTable
12325              .ensureFieldAccessorsInitialized(
12326                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.Builder.class);
12327        }
12328    
12329        public static com.google.protobuf.Parser<NewEpochResponseProto> PARSER =
12330            new com.google.protobuf.AbstractParser<NewEpochResponseProto>() {
12331          public NewEpochResponseProto parsePartialFrom(
12332              com.google.protobuf.CodedInputStream input,
12333              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12334              throws com.google.protobuf.InvalidProtocolBufferException {
12335            return new NewEpochResponseProto(input, extensionRegistry);
12336          }
12337        };
12338    
12339        @java.lang.Override
12340        public com.google.protobuf.Parser<NewEpochResponseProto> getParserForType() {
12341          return PARSER;
12342        }
12343    
12344        private int bitField0_;
12345        // optional uint64 lastSegmentTxId = 1;
12346        public static final int LASTSEGMENTTXID_FIELD_NUMBER = 1;
12347        private long lastSegmentTxId_;
12348        /**
12349         * <code>optional uint64 lastSegmentTxId = 1;</code>
12350         */
12351        public boolean hasLastSegmentTxId() {
12352          return ((bitField0_ & 0x00000001) == 0x00000001);
12353        }
12354        /**
12355         * <code>optional uint64 lastSegmentTxId = 1;</code>
12356         */
12357        public long getLastSegmentTxId() {
12358          return lastSegmentTxId_;
12359        }
12360    
12361        private void initFields() {
12362          lastSegmentTxId_ = 0L;
12363        }
12364        private byte memoizedIsInitialized = -1;
12365        public final boolean isInitialized() {
12366          byte isInitialized = memoizedIsInitialized;
12367          if (isInitialized != -1) return isInitialized == 1;
12368    
12369          memoizedIsInitialized = 1;
12370          return true;
12371        }
12372    
12373        public void writeTo(com.google.protobuf.CodedOutputStream output)
12374                            throws java.io.IOException {
12375          getSerializedSize();
12376          if (((bitField0_ & 0x00000001) == 0x00000001)) {
12377            output.writeUInt64(1, lastSegmentTxId_);
12378          }
12379          getUnknownFields().writeTo(output);
12380        }
12381    
12382        private int memoizedSerializedSize = -1;
12383        public int getSerializedSize() {
12384          int size = memoizedSerializedSize;
12385          if (size != -1) return size;
12386    
12387          size = 0;
12388          if (((bitField0_ & 0x00000001) == 0x00000001)) {
12389            size += com.google.protobuf.CodedOutputStream
12390              .computeUInt64Size(1, lastSegmentTxId_);
12391          }
12392          size += getUnknownFields().getSerializedSize();
12393          memoizedSerializedSize = size;
12394          return size;
12395        }
12396    
12397        private static final long serialVersionUID = 0L;
12398        @java.lang.Override
12399        protected java.lang.Object writeReplace()
12400            throws java.io.ObjectStreamException {
12401          return super.writeReplace();
12402        }
12403    
12404        @java.lang.Override
12405        public boolean equals(final java.lang.Object obj) {
12406          if (obj == this) {
12407           return true;
12408          }
12409          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto)) {
12410            return super.equals(obj);
12411          }
12412          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) obj;
12413    
12414          boolean result = true;
12415          result = result && (hasLastSegmentTxId() == other.hasLastSegmentTxId());
12416          if (hasLastSegmentTxId()) {
12417            result = result && (getLastSegmentTxId()
12418                == other.getLastSegmentTxId());
12419          }
12420          result = result &&
12421              getUnknownFields().equals(other.getUnknownFields());
12422          return result;
12423        }
12424    
12425        private int memoizedHashCode = 0;
12426        @java.lang.Override
12427        public int hashCode() {
12428          if (memoizedHashCode != 0) {
12429            return memoizedHashCode;
12430          }
12431          int hash = 41;
12432          hash = (19 * hash) + getDescriptorForType().hashCode();
12433          if (hasLastSegmentTxId()) {
12434            hash = (37 * hash) + LASTSEGMENTTXID_FIELD_NUMBER;
12435            hash = (53 * hash) + hashLong(getLastSegmentTxId());
12436          }
12437          hash = (29 * hash) + getUnknownFields().hashCode();
12438          memoizedHashCode = hash;
12439          return hash;
12440        }
12441    
12442        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
12443            com.google.protobuf.ByteString data)
12444            throws com.google.protobuf.InvalidProtocolBufferException {
12445          return PARSER.parseFrom(data);
12446        }
12447        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
12448            com.google.protobuf.ByteString data,
12449            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12450            throws com.google.protobuf.InvalidProtocolBufferException {
12451          return PARSER.parseFrom(data, extensionRegistry);
12452        }
12453        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(byte[] data)
12454            throws com.google.protobuf.InvalidProtocolBufferException {
12455          return PARSER.parseFrom(data);
12456        }
12457        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
12458            byte[] data,
12459            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12460            throws com.google.protobuf.InvalidProtocolBufferException {
12461          return PARSER.parseFrom(data, extensionRegistry);
12462        }
12463        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(java.io.InputStream input)
12464            throws java.io.IOException {
12465          return PARSER.parseFrom(input);
12466        }
12467        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
12468            java.io.InputStream input,
12469            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12470            throws java.io.IOException {
12471          return PARSER.parseFrom(input, extensionRegistry);
12472        }
12473        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseDelimitedFrom(java.io.InputStream input)
12474            throws java.io.IOException {
12475          return PARSER.parseDelimitedFrom(input);
12476        }
12477        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseDelimitedFrom(
12478            java.io.InputStream input,
12479            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12480            throws java.io.IOException {
12481          return PARSER.parseDelimitedFrom(input, extensionRegistry);
12482        }
12483        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
12484            com.google.protobuf.CodedInputStream input)
12485            throws java.io.IOException {
12486          return PARSER.parseFrom(input);
12487        }
12488        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
12489            com.google.protobuf.CodedInputStream input,
12490            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12491            throws java.io.IOException {
12492          return PARSER.parseFrom(input, extensionRegistry);
12493        }
12494    
12495        public static Builder newBuilder() { return Builder.create(); }
12496        public Builder newBuilderForType() { return newBuilder(); }
12497        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto prototype) {
12498          return newBuilder().mergeFrom(prototype);
12499        }
12500        public Builder toBuilder() { return newBuilder(this); }
12501    
12502        @java.lang.Override
12503        protected Builder newBuilderForType(
12504            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
12505          Builder builder = new Builder(parent);
12506          return builder;
12507        }
12508        /**
12509         * Protobuf type {@code hadoop.hdfs.NewEpochResponseProto}
12510         */
12511        public static final class Builder extends
12512            com.google.protobuf.GeneratedMessage.Builder<Builder>
12513           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProtoOrBuilder {
12514          public static final com.google.protobuf.Descriptors.Descriptor
12515              getDescriptor() {
12516            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor;
12517          }
12518    
12519          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
12520              internalGetFieldAccessorTable() {
12521            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochResponseProto_fieldAccessorTable
12522                .ensureFieldAccessorsInitialized(
12523                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.Builder.class);
12524          }
12525    
12526          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.newBuilder()
12527          private Builder() {
12528            maybeForceBuilderInitialization();
12529          }
12530    
12531          private Builder(
12532              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
12533            super(parent);
12534            maybeForceBuilderInitialization();
12535          }
12536          private void maybeForceBuilderInitialization() {
12537            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
12538            }
12539          }
12540          private static Builder create() {
12541            return new Builder();
12542          }
12543    
12544          public Builder clear() {
12545            super.clear();
12546            lastSegmentTxId_ = 0L;
12547            bitField0_ = (bitField0_ & ~0x00000001);
12548            return this;
12549          }
12550    
12551          public Builder clone() {
12552            return create().mergeFrom(buildPartial());
12553          }
12554    
12555          public com.google.protobuf.Descriptors.Descriptor
12556              getDescriptorForType() {
12557            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor;
12558          }
12559    
12560          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto getDefaultInstanceForType() {
12561            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance();
12562          }
12563    
12564          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto build() {
12565            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto result = buildPartial();
12566            if (!result.isInitialized()) {
12567              throw newUninitializedMessageException(result);
12568            }
12569            return result;
12570          }
12571    
12572          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto buildPartial() {
12573            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto(this);
12574            int from_bitField0_ = bitField0_;
12575            int to_bitField0_ = 0;
12576            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
12577              to_bitField0_ |= 0x00000001;
12578            }
12579            result.lastSegmentTxId_ = lastSegmentTxId_;
12580            result.bitField0_ = to_bitField0_;
12581            onBuilt();
12582            return result;
12583          }
12584    
12585          public Builder mergeFrom(com.google.protobuf.Message other) {
12586            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) {
12587              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto)other);
12588            } else {
12589              super.mergeFrom(other);
12590              return this;
12591            }
12592          }
12593    
12594          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto other) {
12595            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance()) return this;
12596            if (other.hasLastSegmentTxId()) {
12597              setLastSegmentTxId(other.getLastSegmentTxId());
12598            }
12599            this.mergeUnknownFields(other.getUnknownFields());
12600            return this;
12601          }
12602    
12603          public final boolean isInitialized() {
12604            return true;
12605          }
12606    
12607          public Builder mergeFrom(
12608              com.google.protobuf.CodedInputStream input,
12609              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12610              throws java.io.IOException {
12611            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parsedMessage = null;
12612            try {
12613              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
12614            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
12615              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) e.getUnfinishedMessage();
12616              throw e;
12617            } finally {
12618              if (parsedMessage != null) {
12619                mergeFrom(parsedMessage);
12620              }
12621            }
12622            return this;
12623          }
12624          private int bitField0_;
12625    
12626          // optional uint64 lastSegmentTxId = 1;
12627          private long lastSegmentTxId_ ;
12628          /**
12629           * <code>optional uint64 lastSegmentTxId = 1;</code>
12630           */
12631          public boolean hasLastSegmentTxId() {
12632            return ((bitField0_ & 0x00000001) == 0x00000001);
12633          }
12634          /**
12635           * <code>optional uint64 lastSegmentTxId = 1;</code>
12636           */
12637          public long getLastSegmentTxId() {
12638            return lastSegmentTxId_;
12639          }
12640          /**
12641           * <code>optional uint64 lastSegmentTxId = 1;</code>
12642           */
12643          public Builder setLastSegmentTxId(long value) {
12644            bitField0_ |= 0x00000001;
12645            lastSegmentTxId_ = value;
12646            onChanged();
12647            return this;
12648          }
12649          /**
12650           * <code>optional uint64 lastSegmentTxId = 1;</code>
12651           */
12652          public Builder clearLastSegmentTxId() {
12653            bitField0_ = (bitField0_ & ~0x00000001);
12654            lastSegmentTxId_ = 0L;
12655            onChanged();
12656            return this;
12657          }
12658    
12659          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.NewEpochResponseProto)
12660        }
12661    
12662        static {
12663          defaultInstance = new NewEpochResponseProto(true);
12664          defaultInstance.initFields();
12665        }
12666    
12667        // @@protoc_insertion_point(class_scope:hadoop.hdfs.NewEpochResponseProto)
12668      }
12669    
12670      public interface GetEditLogManifestRequestProtoOrBuilder
12671          extends com.google.protobuf.MessageOrBuilder {
12672    
12673        // required .hadoop.hdfs.JournalIdProto jid = 1;
12674        /**
12675         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12676         */
12677        boolean hasJid();
12678        /**
12679         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12680         */
12681        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
12682        /**
12683         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12684         */
12685        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
12686    
12687        // required uint64 sinceTxId = 2;
12688        /**
12689         * <code>required uint64 sinceTxId = 2;</code>
12690         *
12691         * <pre>
12692         * Transaction ID
12693         * </pre>
12694         */
12695        boolean hasSinceTxId();
12696        /**
12697         * <code>required uint64 sinceTxId = 2;</code>
12698         *
12699         * <pre>
12700         * Transaction ID
12701         * </pre>
12702         */
12703        long getSinceTxId();
12704    
12705        // optional bool forReading = 3 [default = true];
12706        /**
12707         * <code>optional bool forReading = 3 [default = true];</code>
12708         *
12709         * <pre>
12710         * Whether or not the client will be reading from the returned streams.
12711         * </pre>
12712         */
12713        boolean hasForReading();
12714        /**
12715         * <code>optional bool forReading = 3 [default = true];</code>
12716         *
12717         * <pre>
12718         * Whether or not the client will be reading from the returned streams.
12719         * </pre>
12720         */
12721        boolean getForReading();
12722      }
12723      /**
12724       * Protobuf type {@code hadoop.hdfs.GetEditLogManifestRequestProto}
12725       *
12726       * <pre>
12727       **
12728       * getEditLogManifest()
12729       * </pre>
12730       */
12731      public static final class GetEditLogManifestRequestProto extends
12732          com.google.protobuf.GeneratedMessage
12733          implements GetEditLogManifestRequestProtoOrBuilder {
12734        // Use GetEditLogManifestRequestProto.newBuilder() to construct.
12735        private GetEditLogManifestRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
12736          super(builder);
12737          this.unknownFields = builder.getUnknownFields();
12738        }
12739        private GetEditLogManifestRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
12740    
12741        private static final GetEditLogManifestRequestProto defaultInstance;
12742        public static GetEditLogManifestRequestProto getDefaultInstance() {
12743          return defaultInstance;
12744        }
12745    
12746        public GetEditLogManifestRequestProto getDefaultInstanceForType() {
12747          return defaultInstance;
12748        }
12749    
12750        private final com.google.protobuf.UnknownFieldSet unknownFields;
12751        @java.lang.Override
12752        public final com.google.protobuf.UnknownFieldSet
12753            getUnknownFields() {
12754          return this.unknownFields;
12755        }
12756        private GetEditLogManifestRequestProto(
12757            com.google.protobuf.CodedInputStream input,
12758            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12759            throws com.google.protobuf.InvalidProtocolBufferException {
12760          initFields();
12761          int mutable_bitField0_ = 0;
12762          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
12763              com.google.protobuf.UnknownFieldSet.newBuilder();
12764          try {
12765            boolean done = false;
12766            while (!done) {
12767              int tag = input.readTag();
12768              switch (tag) {
12769                case 0:
12770                  done = true;
12771                  break;
12772                default: {
12773                  if (!parseUnknownField(input, unknownFields,
12774                                         extensionRegistry, tag)) {
12775                    done = true;
12776                  }
12777                  break;
12778                }
12779                case 10: {
12780                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
12781                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
12782                    subBuilder = jid_.toBuilder();
12783                  }
12784                  jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
12785                  if (subBuilder != null) {
12786                    subBuilder.mergeFrom(jid_);
12787                    jid_ = subBuilder.buildPartial();
12788                  }
12789                  bitField0_ |= 0x00000001;
12790                  break;
12791                }
12792                case 16: {
12793                  bitField0_ |= 0x00000002;
12794                  sinceTxId_ = input.readUInt64();
12795                  break;
12796                }
12797                case 24: {
12798                  bitField0_ |= 0x00000004;
12799                  forReading_ = input.readBool();
12800                  break;
12801                }
12802              }
12803            }
12804          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
12805            throw e.setUnfinishedMessage(this);
12806          } catch (java.io.IOException e) {
12807            throw new com.google.protobuf.InvalidProtocolBufferException(
12808                e.getMessage()).setUnfinishedMessage(this);
12809          } finally {
12810            this.unknownFields = unknownFields.build();
12811            makeExtensionsImmutable();
12812          }
12813        }
12814        public static final com.google.protobuf.Descriptors.Descriptor
12815            getDescriptor() {
12816          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor;
12817        }
12818    
12819        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
12820            internalGetFieldAccessorTable() {
12821          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_fieldAccessorTable
12822              .ensureFieldAccessorsInitialized(
12823                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.Builder.class);
12824        }
12825    
12826        public static com.google.protobuf.Parser<GetEditLogManifestRequestProto> PARSER =
12827            new com.google.protobuf.AbstractParser<GetEditLogManifestRequestProto>() {
12828          public GetEditLogManifestRequestProto parsePartialFrom(
12829              com.google.protobuf.CodedInputStream input,
12830              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12831              throws com.google.protobuf.InvalidProtocolBufferException {
12832            return new GetEditLogManifestRequestProto(input, extensionRegistry);
12833          }
12834        };
12835    
12836        @java.lang.Override
12837        public com.google.protobuf.Parser<GetEditLogManifestRequestProto> getParserForType() {
12838          return PARSER;
12839        }
12840    
12841        private int bitField0_;
12842        // required .hadoop.hdfs.JournalIdProto jid = 1;
12843        public static final int JID_FIELD_NUMBER = 1;
12844        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
12845        /**
12846         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12847         */
12848        public boolean hasJid() {
12849          return ((bitField0_ & 0x00000001) == 0x00000001);
12850        }
12851        /**
12852         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12853         */
12854        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
12855          return jid_;
12856        }
12857        /**
12858         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12859         */
12860        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
12861          return jid_;
12862        }
12863    
12864        // required uint64 sinceTxId = 2;
12865        public static final int SINCETXID_FIELD_NUMBER = 2;
12866        private long sinceTxId_;
12867        /**
12868         * <code>required uint64 sinceTxId = 2;</code>
12869         *
12870         * <pre>
12871         * Transaction ID
12872         * </pre>
12873         */
12874        public boolean hasSinceTxId() {
12875          return ((bitField0_ & 0x00000002) == 0x00000002);
12876        }
12877        /**
12878         * <code>required uint64 sinceTxId = 2;</code>
12879         *
12880         * <pre>
12881         * Transaction ID
12882         * </pre>
12883         */
12884        public long getSinceTxId() {
12885          return sinceTxId_;
12886        }
12887    
12888        // optional bool forReading = 3 [default = true];
12889        public static final int FORREADING_FIELD_NUMBER = 3;
12890        private boolean forReading_;
12891        /**
12892         * <code>optional bool forReading = 3 [default = true];</code>
12893         *
12894         * <pre>
12895         * Whether or not the client will be reading from the returned streams.
12896         * </pre>
12897         */
12898        public boolean hasForReading() {
12899          return ((bitField0_ & 0x00000004) == 0x00000004);
12900        }
12901        /**
12902         * <code>optional bool forReading = 3 [default = true];</code>
12903         *
12904         * <pre>
12905         * Whether or not the client will be reading from the returned streams.
12906         * </pre>
12907         */
12908        public boolean getForReading() {
12909          return forReading_;
12910        }
12911    
12912        private void initFields() {
12913          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
12914          sinceTxId_ = 0L;
12915          forReading_ = true;
12916        }
12917        private byte memoizedIsInitialized = -1;
12918        public final boolean isInitialized() {
12919          byte isInitialized = memoizedIsInitialized;
12920          if (isInitialized != -1) return isInitialized == 1;
12921    
12922          if (!hasJid()) {
12923            memoizedIsInitialized = 0;
12924            return false;
12925          }
12926          if (!hasSinceTxId()) {
12927            memoizedIsInitialized = 0;
12928            return false;
12929          }
12930          if (!getJid().isInitialized()) {
12931            memoizedIsInitialized = 0;
12932            return false;
12933          }
12934          memoizedIsInitialized = 1;
12935          return true;
12936        }
12937    
12938        public void writeTo(com.google.protobuf.CodedOutputStream output)
12939                            throws java.io.IOException {
12940          getSerializedSize();
12941          if (((bitField0_ & 0x00000001) == 0x00000001)) {
12942            output.writeMessage(1, jid_);
12943          }
12944          if (((bitField0_ & 0x00000002) == 0x00000002)) {
12945            output.writeUInt64(2, sinceTxId_);
12946          }
12947          if (((bitField0_ & 0x00000004) == 0x00000004)) {
12948            output.writeBool(3, forReading_);
12949          }
12950          getUnknownFields().writeTo(output);
12951        }
12952    
12953        private int memoizedSerializedSize = -1;
12954        public int getSerializedSize() {
12955          int size = memoizedSerializedSize;
12956          if (size != -1) return size;
12957    
12958          size = 0;
12959          if (((bitField0_ & 0x00000001) == 0x00000001)) {
12960            size += com.google.protobuf.CodedOutputStream
12961              .computeMessageSize(1, jid_);
12962          }
12963          if (((bitField0_ & 0x00000002) == 0x00000002)) {
12964            size += com.google.protobuf.CodedOutputStream
12965              .computeUInt64Size(2, sinceTxId_);
12966          }
12967          if (((bitField0_ & 0x00000004) == 0x00000004)) {
12968            size += com.google.protobuf.CodedOutputStream
12969              .computeBoolSize(3, forReading_);
12970          }
12971          size += getUnknownFields().getSerializedSize();
12972          memoizedSerializedSize = size;
12973          return size;
12974        }
12975    
12976        private static final long serialVersionUID = 0L;
12977        @java.lang.Override
12978        protected java.lang.Object writeReplace()
12979            throws java.io.ObjectStreamException {
12980          return super.writeReplace();
12981        }
12982    
12983        @java.lang.Override
12984        public boolean equals(final java.lang.Object obj) {
12985          if (obj == this) {
12986           return true;
12987          }
12988          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)) {
12989            return super.equals(obj);
12990          }
12991          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto) obj;
12992    
12993          boolean result = true;
12994          result = result && (hasJid() == other.hasJid());
12995          if (hasJid()) {
12996            result = result && getJid()
12997                .equals(other.getJid());
12998          }
12999          result = result && (hasSinceTxId() == other.hasSinceTxId());
13000          if (hasSinceTxId()) {
13001            result = result && (getSinceTxId()
13002                == other.getSinceTxId());
13003          }
13004          result = result && (hasForReading() == other.hasForReading());
13005          if (hasForReading()) {
13006            result = result && (getForReading()
13007                == other.getForReading());
13008          }
13009          result = result &&
13010              getUnknownFields().equals(other.getUnknownFields());
13011          return result;
13012        }
13013    
13014        private int memoizedHashCode = 0;
13015        @java.lang.Override
13016        public int hashCode() {
13017          if (memoizedHashCode != 0) {
13018            return memoizedHashCode;
13019          }
13020          int hash = 41;
13021          hash = (19 * hash) + getDescriptorForType().hashCode();
13022          if (hasJid()) {
13023            hash = (37 * hash) + JID_FIELD_NUMBER;
13024            hash = (53 * hash) + getJid().hashCode();
13025          }
13026          if (hasSinceTxId()) {
13027            hash = (37 * hash) + SINCETXID_FIELD_NUMBER;
13028            hash = (53 * hash) + hashLong(getSinceTxId());
13029          }
13030          if (hasForReading()) {
13031            hash = (37 * hash) + FORREADING_FIELD_NUMBER;
13032            hash = (53 * hash) + hashBoolean(getForReading());
13033          }
13034          hash = (29 * hash) + getUnknownFields().hashCode();
13035          memoizedHashCode = hash;
13036          return hash;
13037        }
13038    
13039        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
13040            com.google.protobuf.ByteString data)
13041            throws com.google.protobuf.InvalidProtocolBufferException {
13042          return PARSER.parseFrom(data);
13043        }
13044        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
13045            com.google.protobuf.ByteString data,
13046            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13047            throws com.google.protobuf.InvalidProtocolBufferException {
13048          return PARSER.parseFrom(data, extensionRegistry);
13049        }
13050        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(byte[] data)
13051            throws com.google.protobuf.InvalidProtocolBufferException {
13052          return PARSER.parseFrom(data);
13053        }
13054        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
13055            byte[] data,
13056            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13057            throws com.google.protobuf.InvalidProtocolBufferException {
13058          return PARSER.parseFrom(data, extensionRegistry);
13059        }
13060        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(java.io.InputStream input)
13061            throws java.io.IOException {
13062          return PARSER.parseFrom(input);
13063        }
13064        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
13065            java.io.InputStream input,
13066            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13067            throws java.io.IOException {
13068          return PARSER.parseFrom(input, extensionRegistry);
13069        }
13070        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseDelimitedFrom(java.io.InputStream input)
13071            throws java.io.IOException {
13072          return PARSER.parseDelimitedFrom(input);
13073        }
13074        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseDelimitedFrom(
13075            java.io.InputStream input,
13076            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13077            throws java.io.IOException {
13078          return PARSER.parseDelimitedFrom(input, extensionRegistry);
13079        }
13080        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
13081            com.google.protobuf.CodedInputStream input)
13082            throws java.io.IOException {
13083          return PARSER.parseFrom(input);
13084        }
13085        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
13086            com.google.protobuf.CodedInputStream input,
13087            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13088            throws java.io.IOException {
13089          return PARSER.parseFrom(input, extensionRegistry);
13090        }
13091    
13092        public static Builder newBuilder() { return Builder.create(); }
13093        public Builder newBuilderForType() { return newBuilder(); }
13094        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto prototype) {
13095          return newBuilder().mergeFrom(prototype);
13096        }
13097        public Builder toBuilder() { return newBuilder(this); }
13098    
13099        @java.lang.Override
13100        protected Builder newBuilderForType(
13101            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
13102          Builder builder = new Builder(parent);
13103          return builder;
13104        }
13105        /**
13106         * Protobuf type {@code hadoop.hdfs.GetEditLogManifestRequestProto}
13107         *
13108         * <pre>
13109         **
13110         * getEditLogManifest()
13111         * </pre>
13112         */
13113        public static final class Builder extends
13114            com.google.protobuf.GeneratedMessage.Builder<Builder>
13115           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProtoOrBuilder {
13116          public static final com.google.protobuf.Descriptors.Descriptor
13117              getDescriptor() {
13118            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor;
13119          }
13120    
13121          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
13122              internalGetFieldAccessorTable() {
13123            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_fieldAccessorTable
13124                .ensureFieldAccessorsInitialized(
13125                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.Builder.class);
13126          }
13127    
13128          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.newBuilder()
13129          private Builder() {
13130            maybeForceBuilderInitialization();
13131          }
13132    
13133          private Builder(
13134              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
13135            super(parent);
13136            maybeForceBuilderInitialization();
13137          }
13138          private void maybeForceBuilderInitialization() {
13139            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
13140              getJidFieldBuilder();
13141            }
13142          }
13143          private static Builder create() {
13144            return new Builder();
13145          }
13146    
13147          public Builder clear() {
13148            super.clear();
13149            if (jidBuilder_ == null) {
13150              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
13151            } else {
13152              jidBuilder_.clear();
13153            }
13154            bitField0_ = (bitField0_ & ~0x00000001);
13155            sinceTxId_ = 0L;
13156            bitField0_ = (bitField0_ & ~0x00000002);
13157            forReading_ = true;
13158            bitField0_ = (bitField0_ & ~0x00000004);
13159            return this;
13160          }
13161    
13162          public Builder clone() {
13163            return create().mergeFrom(buildPartial());
13164          }
13165    
13166          public com.google.protobuf.Descriptors.Descriptor
13167              getDescriptorForType() {
13168            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor;
13169          }
13170    
13171          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto getDefaultInstanceForType() {
13172            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance();
13173          }
13174    
13175          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto build() {
13176            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto result = buildPartial();
13177            if (!result.isInitialized()) {
13178              throw newUninitializedMessageException(result);
13179            }
13180            return result;
13181          }
13182    
13183          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto buildPartial() {
13184            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto(this);
13185            int from_bitField0_ = bitField0_;
13186            int to_bitField0_ = 0;
13187            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
13188              to_bitField0_ |= 0x00000001;
13189            }
13190            if (jidBuilder_ == null) {
13191              result.jid_ = jid_;
13192            } else {
13193              result.jid_ = jidBuilder_.build();
13194            }
13195            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
13196              to_bitField0_ |= 0x00000002;
13197            }
13198            result.sinceTxId_ = sinceTxId_;
13199            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
13200              to_bitField0_ |= 0x00000004;
13201            }
13202            result.forReading_ = forReading_;
13203            result.bitField0_ = to_bitField0_;
13204            onBuilt();
13205            return result;
13206          }
13207    
13208          public Builder mergeFrom(com.google.protobuf.Message other) {
13209            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto) {
13210              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)other);
13211            } else {
13212              super.mergeFrom(other);
13213              return this;
13214            }
13215          }
13216    
13217          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto other) {
13218            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance()) return this;
13219            if (other.hasJid()) {
13220              mergeJid(other.getJid());
13221            }
13222            if (other.hasSinceTxId()) {
13223              setSinceTxId(other.getSinceTxId());
13224            }
13225            if (other.hasForReading()) {
13226              setForReading(other.getForReading());
13227            }
13228            this.mergeUnknownFields(other.getUnknownFields());
13229            return this;
13230          }
13231    
13232          public final boolean isInitialized() {
13233            if (!hasJid()) {
13234              
13235              return false;
13236            }
13237            if (!hasSinceTxId()) {
13238              
13239              return false;
13240            }
13241            if (!getJid().isInitialized()) {
13242              
13243              return false;
13244            }
13245            return true;
13246          }
13247    
13248          public Builder mergeFrom(
13249              com.google.protobuf.CodedInputStream input,
13250              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13251              throws java.io.IOException {
13252            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parsedMessage = null;
13253            try {
13254              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
13255            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
13256              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto) e.getUnfinishedMessage();
13257              throw e;
13258            } finally {
13259              if (parsedMessage != null) {
13260                mergeFrom(parsedMessage);
13261              }
13262            }
13263            return this;
13264          }
13265          private int bitField0_;
13266    
13267          // required .hadoop.hdfs.JournalIdProto jid = 1;
13268          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
13269          private com.google.protobuf.SingleFieldBuilder<
13270              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
13271          /**
13272           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13273           */
13274          public boolean hasJid() {
13275            return ((bitField0_ & 0x00000001) == 0x00000001);
13276          }
13277          /**
13278           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13279           */
13280          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
13281            if (jidBuilder_ == null) {
13282              return jid_;
13283            } else {
13284              return jidBuilder_.getMessage();
13285            }
13286          }
13287          /**
13288           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13289           */
13290          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
13291            if (jidBuilder_ == null) {
13292              if (value == null) {
13293                throw new NullPointerException();
13294              }
13295              jid_ = value;
13296              onChanged();
13297            } else {
13298              jidBuilder_.setMessage(value);
13299            }
13300            bitField0_ |= 0x00000001;
13301            return this;
13302          }
13303          /**
13304           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13305           */
13306          public Builder setJid(
13307              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
13308            if (jidBuilder_ == null) {
13309              jid_ = builderForValue.build();
13310              onChanged();
13311            } else {
13312              jidBuilder_.setMessage(builderForValue.build());
13313            }
13314            bitField0_ |= 0x00000001;
13315            return this;
13316          }
13317          /**
13318           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13319           */
13320          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
13321            if (jidBuilder_ == null) {
13322              if (((bitField0_ & 0x00000001) == 0x00000001) &&
13323                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
13324                jid_ =
13325                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
13326              } else {
13327                jid_ = value;
13328              }
13329              onChanged();
13330            } else {
13331              jidBuilder_.mergeFrom(value);
13332            }
13333            bitField0_ |= 0x00000001;
13334            return this;
13335          }
13336          /**
13337           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13338           */
13339          public Builder clearJid() {
13340            if (jidBuilder_ == null) {
13341              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
13342              onChanged();
13343            } else {
13344              jidBuilder_.clear();
13345            }
13346            bitField0_ = (bitField0_ & ~0x00000001);
13347            return this;
13348          }
13349          /**
13350           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13351           */
13352          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
13353            bitField0_ |= 0x00000001;
13354            onChanged();
13355            return getJidFieldBuilder().getBuilder();
13356          }
13357          /**
13358           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13359           */
13360          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
13361            if (jidBuilder_ != null) {
13362              return jidBuilder_.getMessageOrBuilder();
13363            } else {
13364              return jid_;
13365            }
13366          }
13367          /**
13368           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13369           */
13370          private com.google.protobuf.SingleFieldBuilder<
13371              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
13372              getJidFieldBuilder() {
13373            if (jidBuilder_ == null) {
13374              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
13375                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
13376                      jid_,
13377                      getParentForChildren(),
13378                      isClean());
13379              jid_ = null;
13380            }
13381            return jidBuilder_;
13382          }
13383    
13384          // required uint64 sinceTxId = 2;
13385          private long sinceTxId_ ;
13386          /**
13387           * <code>required uint64 sinceTxId = 2;</code>
13388           *
13389           * <pre>
13390           * Transaction ID
13391           * </pre>
13392           */
13393          public boolean hasSinceTxId() {
13394            return ((bitField0_ & 0x00000002) == 0x00000002);
13395          }
13396          /**
13397           * <code>required uint64 sinceTxId = 2;</code>
13398           *
13399           * <pre>
13400           * Transaction ID
13401           * </pre>
13402           */
13403          public long getSinceTxId() {
13404            return sinceTxId_;
13405          }
13406          /**
13407           * <code>required uint64 sinceTxId = 2;</code>
13408           *
13409           * <pre>
13410           * Transaction ID
13411           * </pre>
13412           */
13413          public Builder setSinceTxId(long value) {
13414            bitField0_ |= 0x00000002;
13415            sinceTxId_ = value;
13416            onChanged();
13417            return this;
13418          }
13419          /**
13420           * <code>required uint64 sinceTxId = 2;</code>
13421           *
13422           * <pre>
13423           * Transaction ID
13424           * </pre>
13425           */
13426          public Builder clearSinceTxId() {
13427            bitField0_ = (bitField0_ & ~0x00000002);
13428            sinceTxId_ = 0L;
13429            onChanged();
13430            return this;
13431          }
13432    
13433          // optional bool forReading = 3 [default = true];
13434          private boolean forReading_ = true;
13435          /**
13436           * <code>optional bool forReading = 3 [default = true];</code>
13437           *
13438           * <pre>
13439           * Whether or not the client will be reading from the returned streams.
13440           * </pre>
13441           */
13442          public boolean hasForReading() {
13443            return ((bitField0_ & 0x00000004) == 0x00000004);
13444          }
13445          /**
13446           * <code>optional bool forReading = 3 [default = true];</code>
13447           *
13448           * <pre>
13449           * Whether or not the client will be reading from the returned streams.
13450           * </pre>
13451           */
13452          public boolean getForReading() {
13453            return forReading_;
13454          }
13455          /**
13456           * <code>optional bool forReading = 3 [default = true];</code>
13457           *
13458           * <pre>
13459           * Whether or not the client will be reading from the returned streams.
13460           * </pre>
13461           */
13462          public Builder setForReading(boolean value) {
13463            bitField0_ |= 0x00000004;
13464            forReading_ = value;
13465            onChanged();
13466            return this;
13467          }
13468          /**
13469           * <code>optional bool forReading = 3 [default = true];</code>
13470           *
13471           * <pre>
13472           * Whether or not the client will be reading from the returned streams.
13473           * </pre>
13474           */
13475          public Builder clearForReading() {
13476            bitField0_ = (bitField0_ & ~0x00000004);
13477            forReading_ = true;
13478            onChanged();
13479            return this;
13480          }
13481    
13482          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetEditLogManifestRequestProto)
13483        }
13484    
13485        static {
13486          defaultInstance = new GetEditLogManifestRequestProto(true);
13487          defaultInstance.initFields();
13488        }
13489    
13490        // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetEditLogManifestRequestProto)
13491      }
13492    
13493      public interface GetEditLogManifestResponseProtoOrBuilder
13494          extends com.google.protobuf.MessageOrBuilder {
13495    
13496        // required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;
13497        /**
13498         * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
13499         */
13500        boolean hasManifest();
13501        /**
13502         * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
13503         */
13504        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getManifest();
13505        /**
13506         * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
13507         */
13508        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder();
13509    
13510        // required uint32 httpPort = 2;
13511        /**
13512         * <code>required uint32 httpPort = 2;</code>
13513         */
13514        boolean hasHttpPort();
13515        /**
13516         * <code>required uint32 httpPort = 2;</code>
13517         */
13518        int getHttpPort();
13519      }
13520      /**
13521       * Protobuf type {@code hadoop.hdfs.GetEditLogManifestResponseProto}
13522       */
13523      public static final class GetEditLogManifestResponseProto extends
13524          com.google.protobuf.GeneratedMessage
13525          implements GetEditLogManifestResponseProtoOrBuilder {
13526        // Use GetEditLogManifestResponseProto.newBuilder() to construct.
13527        private GetEditLogManifestResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
13528          super(builder);
13529          this.unknownFields = builder.getUnknownFields();
13530        }
13531        private GetEditLogManifestResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
13532    
13533        private static final GetEditLogManifestResponseProto defaultInstance;
13534        public static GetEditLogManifestResponseProto getDefaultInstance() {
13535          return defaultInstance;
13536        }
13537    
13538        public GetEditLogManifestResponseProto getDefaultInstanceForType() {
13539          return defaultInstance;
13540        }
13541    
13542        private final com.google.protobuf.UnknownFieldSet unknownFields;
13543        @java.lang.Override
13544        public final com.google.protobuf.UnknownFieldSet
13545            getUnknownFields() {
13546          return this.unknownFields;
13547        }
13548        private GetEditLogManifestResponseProto(
13549            com.google.protobuf.CodedInputStream input,
13550            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13551            throws com.google.protobuf.InvalidProtocolBufferException {
13552          initFields();
13553          int mutable_bitField0_ = 0;
13554          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
13555              com.google.protobuf.UnknownFieldSet.newBuilder();
13556          try {
13557            boolean done = false;
13558            while (!done) {
13559              int tag = input.readTag();
13560              switch (tag) {
13561                case 0:
13562                  done = true;
13563                  break;
13564                default: {
13565                  if (!parseUnknownField(input, unknownFields,
13566                                         extensionRegistry, tag)) {
13567                    done = true;
13568                  }
13569                  break;
13570                }
13571                case 10: {
13572                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder subBuilder = null;
13573                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
13574                    subBuilder = manifest_.toBuilder();
13575                  }
13576                  manifest_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.PARSER, extensionRegistry);
13577                  if (subBuilder != null) {
13578                    subBuilder.mergeFrom(manifest_);
13579                    manifest_ = subBuilder.buildPartial();
13580                  }
13581                  bitField0_ |= 0x00000001;
13582                  break;
13583                }
13584                case 16: {
13585                  bitField0_ |= 0x00000002;
13586                  httpPort_ = input.readUInt32();
13587                  break;
13588                }
13589              }
13590            }
13591          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
13592            throw e.setUnfinishedMessage(this);
13593          } catch (java.io.IOException e) {
13594            throw new com.google.protobuf.InvalidProtocolBufferException(
13595                e.getMessage()).setUnfinishedMessage(this);
13596          } finally {
13597            this.unknownFields = unknownFields.build();
13598            makeExtensionsImmutable();
13599          }
13600        }
13601        public static final com.google.protobuf.Descriptors.Descriptor
13602            getDescriptor() {
13603          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor;
13604        }
13605    
13606        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
13607            internalGetFieldAccessorTable() {
13608          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_fieldAccessorTable
13609              .ensureFieldAccessorsInitialized(
13610                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.Builder.class);
13611        }
13612    
13613        public static com.google.protobuf.Parser<GetEditLogManifestResponseProto> PARSER =
13614            new com.google.protobuf.AbstractParser<GetEditLogManifestResponseProto>() {
13615          public GetEditLogManifestResponseProto parsePartialFrom(
13616              com.google.protobuf.CodedInputStream input,
13617              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13618              throws com.google.protobuf.InvalidProtocolBufferException {
13619            return new GetEditLogManifestResponseProto(input, extensionRegistry);
13620          }
13621        };
13622    
13623        @java.lang.Override
13624        public com.google.protobuf.Parser<GetEditLogManifestResponseProto> getParserForType() {
13625          return PARSER;
13626        }
13627    
13628        private int bitField0_;
13629        // required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;
13630        public static final int MANIFEST_FIELD_NUMBER = 1;
13631        private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto manifest_;
13632        /**
13633         * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
13634         */
13635        public boolean hasManifest() {
13636          return ((bitField0_ & 0x00000001) == 0x00000001);
13637        }
13638        /**
13639         * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
13640         */
13641        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getManifest() {
13642          return manifest_;
13643        }
13644        /**
13645         * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
13646         */
13647        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder() {
13648          return manifest_;
13649        }
13650    
13651        // required uint32 httpPort = 2;
13652        public static final int HTTPPORT_FIELD_NUMBER = 2;
13653        private int httpPort_;
13654        /**
13655         * <code>required uint32 httpPort = 2;</code>
13656         */
13657        public boolean hasHttpPort() {
13658          return ((bitField0_ & 0x00000002) == 0x00000002);
13659        }
13660        /**
13661         * <code>required uint32 httpPort = 2;</code>
13662         */
13663        public int getHttpPort() {
13664          return httpPort_;
13665        }
13666    
13667        private void initFields() {
13668          manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
13669          httpPort_ = 0;
13670        }
13671        private byte memoizedIsInitialized = -1;
13672        public final boolean isInitialized() {
13673          byte isInitialized = memoizedIsInitialized;
13674          if (isInitialized != -1) return isInitialized == 1;
13675    
13676          if (!hasManifest()) {
13677            memoizedIsInitialized = 0;
13678            return false;
13679          }
13680          if (!hasHttpPort()) {
13681            memoizedIsInitialized = 0;
13682            return false;
13683          }
13684          if (!getManifest().isInitialized()) {
13685            memoizedIsInitialized = 0;
13686            return false;
13687          }
13688          memoizedIsInitialized = 1;
13689          return true;
13690        }
13691    
13692        public void writeTo(com.google.protobuf.CodedOutputStream output)
13693                            throws java.io.IOException {
13694          getSerializedSize();
13695          if (((bitField0_ & 0x00000001) == 0x00000001)) {
13696            output.writeMessage(1, manifest_);
13697          }
13698          if (((bitField0_ & 0x00000002) == 0x00000002)) {
13699            output.writeUInt32(2, httpPort_);
13700          }
13701          getUnknownFields().writeTo(output);
13702        }
13703    
13704        private int memoizedSerializedSize = -1;
13705        public int getSerializedSize() {
13706          int size = memoizedSerializedSize;
13707          if (size != -1) return size;
13708    
13709          size = 0;
13710          if (((bitField0_ & 0x00000001) == 0x00000001)) {
13711            size += com.google.protobuf.CodedOutputStream
13712              .computeMessageSize(1, manifest_);
13713          }
13714          if (((bitField0_ & 0x00000002) == 0x00000002)) {
13715            size += com.google.protobuf.CodedOutputStream
13716              .computeUInt32Size(2, httpPort_);
13717          }
13718          size += getUnknownFields().getSerializedSize();
13719          memoizedSerializedSize = size;
13720          return size;
13721        }
13722    
13723        private static final long serialVersionUID = 0L;
13724        @java.lang.Override
13725        protected java.lang.Object writeReplace()
13726            throws java.io.ObjectStreamException {
13727          return super.writeReplace();
13728        }
13729    
13730        @java.lang.Override
13731        public boolean equals(final java.lang.Object obj) {
13732          if (obj == this) {
13733           return true;
13734          }
13735          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto)) {
13736            return super.equals(obj);
13737          }
13738          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) obj;
13739    
13740          boolean result = true;
13741          result = result && (hasManifest() == other.hasManifest());
13742          if (hasManifest()) {
13743            result = result && getManifest()
13744                .equals(other.getManifest());
13745          }
13746          result = result && (hasHttpPort() == other.hasHttpPort());
13747          if (hasHttpPort()) {
13748            result = result && (getHttpPort()
13749                == other.getHttpPort());
13750          }
13751          result = result &&
13752              getUnknownFields().equals(other.getUnknownFields());
13753          return result;
13754        }
13755    
13756        private int memoizedHashCode = 0;
13757        @java.lang.Override
13758        public int hashCode() {
13759          if (memoizedHashCode != 0) {
13760            return memoizedHashCode;
13761          }
13762          int hash = 41;
13763          hash = (19 * hash) + getDescriptorForType().hashCode();
13764          if (hasManifest()) {
13765            hash = (37 * hash) + MANIFEST_FIELD_NUMBER;
13766            hash = (53 * hash) + getManifest().hashCode();
13767          }
13768          if (hasHttpPort()) {
13769            hash = (37 * hash) + HTTPPORT_FIELD_NUMBER;
13770            hash = (53 * hash) + getHttpPort();
13771          }
13772          hash = (29 * hash) + getUnknownFields().hashCode();
13773          memoizedHashCode = hash;
13774          return hash;
13775        }
13776    
13777        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
13778            com.google.protobuf.ByteString data)
13779            throws com.google.protobuf.InvalidProtocolBufferException {
13780          return PARSER.parseFrom(data);
13781        }
13782        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
13783            com.google.protobuf.ByteString data,
13784            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13785            throws com.google.protobuf.InvalidProtocolBufferException {
13786          return PARSER.parseFrom(data, extensionRegistry);
13787        }
13788        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(byte[] data)
13789            throws com.google.protobuf.InvalidProtocolBufferException {
13790          return PARSER.parseFrom(data);
13791        }
13792        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
13793            byte[] data,
13794            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13795            throws com.google.protobuf.InvalidProtocolBufferException {
13796          return PARSER.parseFrom(data, extensionRegistry);
13797        }
13798        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(java.io.InputStream input)
13799            throws java.io.IOException {
13800          return PARSER.parseFrom(input);
13801        }
13802        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
13803            java.io.InputStream input,
13804            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13805            throws java.io.IOException {
13806          return PARSER.parseFrom(input, extensionRegistry);
13807        }
13808        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseDelimitedFrom(java.io.InputStream input)
13809            throws java.io.IOException {
13810          return PARSER.parseDelimitedFrom(input);
13811        }
13812        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseDelimitedFrom(
13813            java.io.InputStream input,
13814            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13815            throws java.io.IOException {
13816          return PARSER.parseDelimitedFrom(input, extensionRegistry);
13817        }
13818        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
13819            com.google.protobuf.CodedInputStream input)
13820            throws java.io.IOException {
13821          return PARSER.parseFrom(input);
13822        }
13823        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
13824            com.google.protobuf.CodedInputStream input,
13825            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13826            throws java.io.IOException {
13827          return PARSER.parseFrom(input, extensionRegistry);
13828        }
13829    
13830        public static Builder newBuilder() { return Builder.create(); }
13831        public Builder newBuilderForType() { return newBuilder(); }
13832        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto prototype) {
13833          return newBuilder().mergeFrom(prototype);
13834        }
13835        public Builder toBuilder() { return newBuilder(this); }
13836    
13837        @java.lang.Override
13838        protected Builder newBuilderForType(
13839            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
13840          Builder builder = new Builder(parent);
13841          return builder;
13842        }
13843        /**
13844         * Protobuf type {@code hadoop.hdfs.GetEditLogManifestResponseProto}
13845         */
13846        public static final class Builder extends
13847            com.google.protobuf.GeneratedMessage.Builder<Builder>
13848           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProtoOrBuilder {
13849          public static final com.google.protobuf.Descriptors.Descriptor
13850              getDescriptor() {
13851            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor;
13852          }
13853    
13854          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
13855              internalGetFieldAccessorTable() {
13856            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_fieldAccessorTable
13857                .ensureFieldAccessorsInitialized(
13858                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.Builder.class);
13859          }
13860    
13861          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.newBuilder()
13862          private Builder() {
13863            maybeForceBuilderInitialization();
13864          }
13865    
13866          private Builder(
13867              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
13868            super(parent);
13869            maybeForceBuilderInitialization();
13870          }
13871          private void maybeForceBuilderInitialization() {
13872            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
13873              getManifestFieldBuilder();
13874            }
13875          }
13876          private static Builder create() {
13877            return new Builder();
13878          }
13879    
13880          public Builder clear() {
13881            super.clear();
13882            if (manifestBuilder_ == null) {
13883              manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
13884            } else {
13885              manifestBuilder_.clear();
13886            }
13887            bitField0_ = (bitField0_ & ~0x00000001);
13888            httpPort_ = 0;
13889            bitField0_ = (bitField0_ & ~0x00000002);
13890            return this;
13891          }
13892    
13893          public Builder clone() {
13894            return create().mergeFrom(buildPartial());
13895          }
13896    
13897          public com.google.protobuf.Descriptors.Descriptor
13898              getDescriptorForType() {
13899            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor;
13900          }
13901    
13902          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto getDefaultInstanceForType() {
13903            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance();
13904          }
13905    
13906          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto build() {
13907            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto result = buildPartial();
13908            if (!result.isInitialized()) {
13909              throw newUninitializedMessageException(result);
13910            }
13911            return result;
13912          }
13913    
13914          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto buildPartial() {
13915            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto(this);
13916            int from_bitField0_ = bitField0_;
13917            int to_bitField0_ = 0;
13918            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
13919              to_bitField0_ |= 0x00000001;
13920            }
13921            if (manifestBuilder_ == null) {
13922              result.manifest_ = manifest_;
13923            } else {
13924              result.manifest_ = manifestBuilder_.build();
13925            }
13926            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
13927              to_bitField0_ |= 0x00000002;
13928            }
13929            result.httpPort_ = httpPort_;
13930            result.bitField0_ = to_bitField0_;
13931            onBuilt();
13932            return result;
13933          }
13934    
13935          public Builder mergeFrom(com.google.protobuf.Message other) {
13936            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) {
13937              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto)other);
13938            } else {
13939              super.mergeFrom(other);
13940              return this;
13941            }
13942          }
13943    
13944          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto other) {
13945            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance()) return this;
13946            if (other.hasManifest()) {
13947              mergeManifest(other.getManifest());
13948            }
13949            if (other.hasHttpPort()) {
13950              setHttpPort(other.getHttpPort());
13951            }
13952            this.mergeUnknownFields(other.getUnknownFields());
13953            return this;
13954          }
13955    
13956          public final boolean isInitialized() {
13957            if (!hasManifest()) {
13958              
13959              return false;
13960            }
13961            if (!hasHttpPort()) {
13962              
13963              return false;
13964            }
13965            if (!getManifest().isInitialized()) {
13966              
13967              return false;
13968            }
13969            return true;
13970          }
13971    
13972          public Builder mergeFrom(
13973              com.google.protobuf.CodedInputStream input,
13974              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13975              throws java.io.IOException {
13976            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parsedMessage = null;
13977            try {
13978              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
13979            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
13980              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) e.getUnfinishedMessage();
13981              throw e;
13982            } finally {
13983              if (parsedMessage != null) {
13984                mergeFrom(parsedMessage);
13985              }
13986            }
13987            return this;
13988          }
13989          private int bitField0_;
13990    
13991          // required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;
13992          private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
13993          private com.google.protobuf.SingleFieldBuilder<
13994              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder> manifestBuilder_;
13995          /**
13996           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
13997           */
13998          public boolean hasManifest() {
13999            return ((bitField0_ & 0x00000001) == 0x00000001);
14000          }
14001          /**
14002           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14003           */
14004          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getManifest() {
14005            if (manifestBuilder_ == null) {
14006              return manifest_;
14007            } else {
14008              return manifestBuilder_.getMessage();
14009            }
14010          }
14011          /**
14012           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14013           */
14014          public Builder setManifest(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto value) {
14015            if (manifestBuilder_ == null) {
14016              if (value == null) {
14017                throw new NullPointerException();
14018              }
14019              manifest_ = value;
14020              onChanged();
14021            } else {
14022              manifestBuilder_.setMessage(value);
14023            }
14024            bitField0_ |= 0x00000001;
14025            return this;
14026          }
14027          /**
14028           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14029           */
14030          public Builder setManifest(
14031              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder builderForValue) {
14032            if (manifestBuilder_ == null) {
14033              manifest_ = builderForValue.build();
14034              onChanged();
14035            } else {
14036              manifestBuilder_.setMessage(builderForValue.build());
14037            }
14038            bitField0_ |= 0x00000001;
14039            return this;
14040          }
14041          /**
14042           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14043           */
14044          public Builder mergeManifest(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto value) {
14045            if (manifestBuilder_ == null) {
14046              if (((bitField0_ & 0x00000001) == 0x00000001) &&
14047                  manifest_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance()) {
14048                manifest_ =
14049                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.newBuilder(manifest_).mergeFrom(value).buildPartial();
14050              } else {
14051                manifest_ = value;
14052              }
14053              onChanged();
14054            } else {
14055              manifestBuilder_.mergeFrom(value);
14056            }
14057            bitField0_ |= 0x00000001;
14058            return this;
14059          }
14060          /**
14061           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14062           */
14063          public Builder clearManifest() {
14064            if (manifestBuilder_ == null) {
14065              manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
14066              onChanged();
14067            } else {
14068              manifestBuilder_.clear();
14069            }
14070            bitField0_ = (bitField0_ & ~0x00000001);
14071            return this;
14072          }
14073          /**
14074           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14075           */
14076          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder getManifestBuilder() {
14077            bitField0_ |= 0x00000001;
14078            onChanged();
14079            return getManifestFieldBuilder().getBuilder();
14080          }
14081          /**
14082           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14083           */
14084          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder() {
14085            if (manifestBuilder_ != null) {
14086              return manifestBuilder_.getMessageOrBuilder();
14087            } else {
14088              return manifest_;
14089            }
14090          }
14091          /**
14092           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14093           */
14094          private com.google.protobuf.SingleFieldBuilder<
14095              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder> 
14096              getManifestFieldBuilder() {
14097            if (manifestBuilder_ == null) {
14098              manifestBuilder_ = new com.google.protobuf.SingleFieldBuilder<
14099                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder>(
14100                      manifest_,
14101                      getParentForChildren(),
14102                      isClean());
14103              manifest_ = null;
14104            }
14105            return manifestBuilder_;
14106          }
14107    
14108          // required uint32 httpPort = 2;
14109          private int httpPort_ ;
14110          /**
14111           * <code>required uint32 httpPort = 2;</code>
14112           */
14113          public boolean hasHttpPort() {
14114            return ((bitField0_ & 0x00000002) == 0x00000002);
14115          }
14116          /**
14117           * <code>required uint32 httpPort = 2;</code>
14118           */
14119          public int getHttpPort() {
14120            return httpPort_;
14121          }
14122          /**
14123           * <code>required uint32 httpPort = 2;</code>
14124           */
14125          public Builder setHttpPort(int value) {
14126            bitField0_ |= 0x00000002;
14127            httpPort_ = value;
14128            onChanged();
14129            return this;
14130          }
14131          /**
14132           * <code>required uint32 httpPort = 2;</code>
14133           */
14134          public Builder clearHttpPort() {
14135            bitField0_ = (bitField0_ & ~0x00000002);
14136            httpPort_ = 0;
14137            onChanged();
14138            return this;
14139          }
14140    
14141          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetEditLogManifestResponseProto)
14142        }
14143    
14144        static {
14145          defaultInstance = new GetEditLogManifestResponseProto(true);
14146          defaultInstance.initFields();
14147        }
14148    
14149        // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetEditLogManifestResponseProto)
14150      }
14151    
14152      public interface PrepareRecoveryRequestProtoOrBuilder
14153          extends com.google.protobuf.MessageOrBuilder {
14154    
14155        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
14156        /**
14157         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14158         */
14159        boolean hasReqInfo();
14160        /**
14161         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14162         */
14163        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
14164        /**
14165         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14166         */
14167        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
14168    
14169        // required uint64 segmentTxId = 2;
14170        /**
14171         * <code>required uint64 segmentTxId = 2;</code>
14172         */
14173        boolean hasSegmentTxId();
14174        /**
14175         * <code>required uint64 segmentTxId = 2;</code>
14176         */
14177        long getSegmentTxId();
14178      }
14179      /**
14180       * Protobuf type {@code hadoop.hdfs.PrepareRecoveryRequestProto}
14181       *
14182       * <pre>
14183       **
14184       * prepareRecovery()
14185       * </pre>
14186       */
14187      public static final class PrepareRecoveryRequestProto extends
14188          com.google.protobuf.GeneratedMessage
14189          implements PrepareRecoveryRequestProtoOrBuilder {
14190        // Use PrepareRecoveryRequestProto.newBuilder() to construct.
14191        private PrepareRecoveryRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
14192          super(builder);
14193          this.unknownFields = builder.getUnknownFields();
14194        }
14195        private PrepareRecoveryRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
14196    
14197        private static final PrepareRecoveryRequestProto defaultInstance;
14198        public static PrepareRecoveryRequestProto getDefaultInstance() {
14199          return defaultInstance;
14200        }
14201    
14202        public PrepareRecoveryRequestProto getDefaultInstanceForType() {
14203          return defaultInstance;
14204        }
14205    
14206        private final com.google.protobuf.UnknownFieldSet unknownFields;
14207        @java.lang.Override
14208        public final com.google.protobuf.UnknownFieldSet
14209            getUnknownFields() {
14210          return this.unknownFields;
14211        }
14212        private PrepareRecoveryRequestProto(
14213            com.google.protobuf.CodedInputStream input,
14214            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14215            throws com.google.protobuf.InvalidProtocolBufferException {
14216          initFields();
14217          int mutable_bitField0_ = 0;
14218          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
14219              com.google.protobuf.UnknownFieldSet.newBuilder();
14220          try {
14221            boolean done = false;
14222            while (!done) {
14223              int tag = input.readTag();
14224              switch (tag) {
14225                case 0:
14226                  done = true;
14227                  break;
14228                default: {
14229                  if (!parseUnknownField(input, unknownFields,
14230                                         extensionRegistry, tag)) {
14231                    done = true;
14232                  }
14233                  break;
14234                }
14235                case 10: {
14236                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
14237                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
14238                    subBuilder = reqInfo_.toBuilder();
14239                  }
14240                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
14241                  if (subBuilder != null) {
14242                    subBuilder.mergeFrom(reqInfo_);
14243                    reqInfo_ = subBuilder.buildPartial();
14244                  }
14245                  bitField0_ |= 0x00000001;
14246                  break;
14247                }
14248                case 16: {
14249                  bitField0_ |= 0x00000002;
14250                  segmentTxId_ = input.readUInt64();
14251                  break;
14252                }
14253              }
14254            }
14255          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
14256            throw e.setUnfinishedMessage(this);
14257          } catch (java.io.IOException e) {
14258            throw new com.google.protobuf.InvalidProtocolBufferException(
14259                e.getMessage()).setUnfinishedMessage(this);
14260          } finally {
14261            this.unknownFields = unknownFields.build();
14262            makeExtensionsImmutable();
14263          }
14264        }
14265        public static final com.google.protobuf.Descriptors.Descriptor
14266            getDescriptor() {
14267          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor;
14268        }
14269    
14270        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
14271            internalGetFieldAccessorTable() {
14272          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_fieldAccessorTable
14273              .ensureFieldAccessorsInitialized(
14274                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.Builder.class);
14275        }
14276    
14277        public static com.google.protobuf.Parser<PrepareRecoveryRequestProto> PARSER =
14278            new com.google.protobuf.AbstractParser<PrepareRecoveryRequestProto>() {
14279          public PrepareRecoveryRequestProto parsePartialFrom(
14280              com.google.protobuf.CodedInputStream input,
14281              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14282              throws com.google.protobuf.InvalidProtocolBufferException {
14283            return new PrepareRecoveryRequestProto(input, extensionRegistry);
14284          }
14285        };
14286    
14287        @java.lang.Override
14288        public com.google.protobuf.Parser<PrepareRecoveryRequestProto> getParserForType() {
14289          return PARSER;
14290        }
14291    
14292        private int bitField0_;
14293        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
14294        public static final int REQINFO_FIELD_NUMBER = 1;
14295        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
14296        /**
14297         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14298         */
14299        public boolean hasReqInfo() {
14300          return ((bitField0_ & 0x00000001) == 0x00000001);
14301        }
14302        /**
14303         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14304         */
14305        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
14306          return reqInfo_;
14307        }
14308        /**
14309         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14310         */
14311        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
14312          return reqInfo_;
14313        }
14314    
14315        // required uint64 segmentTxId = 2;
14316        public static final int SEGMENTTXID_FIELD_NUMBER = 2;
14317        private long segmentTxId_;
14318        /**
14319         * <code>required uint64 segmentTxId = 2;</code>
14320         */
14321        public boolean hasSegmentTxId() {
14322          return ((bitField0_ & 0x00000002) == 0x00000002);
14323        }
14324        /**
14325         * <code>required uint64 segmentTxId = 2;</code>
14326         */
14327        public long getSegmentTxId() {
14328          return segmentTxId_;
14329        }
14330    
14331        private void initFields() {
14332          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
14333          segmentTxId_ = 0L;
14334        }
14335        private byte memoizedIsInitialized = -1;
14336        public final boolean isInitialized() {
14337          byte isInitialized = memoizedIsInitialized;
14338          if (isInitialized != -1) return isInitialized == 1;
14339    
14340          if (!hasReqInfo()) {
14341            memoizedIsInitialized = 0;
14342            return false;
14343          }
14344          if (!hasSegmentTxId()) {
14345            memoizedIsInitialized = 0;
14346            return false;
14347          }
14348          if (!getReqInfo().isInitialized()) {
14349            memoizedIsInitialized = 0;
14350            return false;
14351          }
14352          memoizedIsInitialized = 1;
14353          return true;
14354        }
14355    
14356        public void writeTo(com.google.protobuf.CodedOutputStream output)
14357                            throws java.io.IOException {
14358          getSerializedSize();
14359          if (((bitField0_ & 0x00000001) == 0x00000001)) {
14360            output.writeMessage(1, reqInfo_);
14361          }
14362          if (((bitField0_ & 0x00000002) == 0x00000002)) {
14363            output.writeUInt64(2, segmentTxId_);
14364          }
14365          getUnknownFields().writeTo(output);
14366        }
14367    
14368        private int memoizedSerializedSize = -1;
14369        public int getSerializedSize() {
14370          int size = memoizedSerializedSize;
14371          if (size != -1) return size;
14372    
14373          size = 0;
14374          if (((bitField0_ & 0x00000001) == 0x00000001)) {
14375            size += com.google.protobuf.CodedOutputStream
14376              .computeMessageSize(1, reqInfo_);
14377          }
14378          if (((bitField0_ & 0x00000002) == 0x00000002)) {
14379            size += com.google.protobuf.CodedOutputStream
14380              .computeUInt64Size(2, segmentTxId_);
14381          }
14382          size += getUnknownFields().getSerializedSize();
14383          memoizedSerializedSize = size;
14384          return size;
14385        }
14386    
14387        private static final long serialVersionUID = 0L;
14388        @java.lang.Override
14389        protected java.lang.Object writeReplace()
14390            throws java.io.ObjectStreamException {
14391          return super.writeReplace();
14392        }
14393    
14394        @java.lang.Override
14395        public boolean equals(final java.lang.Object obj) {
14396          if (obj == this) {
14397           return true;
14398          }
14399          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)) {
14400            return super.equals(obj);
14401          }
14402          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto) obj;
14403    
14404          boolean result = true;
14405          result = result && (hasReqInfo() == other.hasReqInfo());
14406          if (hasReqInfo()) {
14407            result = result && getReqInfo()
14408                .equals(other.getReqInfo());
14409          }
14410          result = result && (hasSegmentTxId() == other.hasSegmentTxId());
14411          if (hasSegmentTxId()) {
14412            result = result && (getSegmentTxId()
14413                == other.getSegmentTxId());
14414          }
14415          result = result &&
14416              getUnknownFields().equals(other.getUnknownFields());
14417          return result;
14418        }
14419    
14420        private int memoizedHashCode = 0;
14421        @java.lang.Override
14422        public int hashCode() {
14423          if (memoizedHashCode != 0) {
14424            return memoizedHashCode;
14425          }
14426          int hash = 41;
14427          hash = (19 * hash) + getDescriptorForType().hashCode();
14428          if (hasReqInfo()) {
14429            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
14430            hash = (53 * hash) + getReqInfo().hashCode();
14431          }
14432          if (hasSegmentTxId()) {
14433            hash = (37 * hash) + SEGMENTTXID_FIELD_NUMBER;
14434            hash = (53 * hash) + hashLong(getSegmentTxId());
14435          }
14436          hash = (29 * hash) + getUnknownFields().hashCode();
14437          memoizedHashCode = hash;
14438          return hash;
14439        }
14440    
14441        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
14442            com.google.protobuf.ByteString data)
14443            throws com.google.protobuf.InvalidProtocolBufferException {
14444          return PARSER.parseFrom(data);
14445        }
14446        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
14447            com.google.protobuf.ByteString data,
14448            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14449            throws com.google.protobuf.InvalidProtocolBufferException {
14450          return PARSER.parseFrom(data, extensionRegistry);
14451        }
14452        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(byte[] data)
14453            throws com.google.protobuf.InvalidProtocolBufferException {
14454          return PARSER.parseFrom(data);
14455        }
14456        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
14457            byte[] data,
14458            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14459            throws com.google.protobuf.InvalidProtocolBufferException {
14460          return PARSER.parseFrom(data, extensionRegistry);
14461        }
14462        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(java.io.InputStream input)
14463            throws java.io.IOException {
14464          return PARSER.parseFrom(input);
14465        }
14466        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
14467            java.io.InputStream input,
14468            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14469            throws java.io.IOException {
14470          return PARSER.parseFrom(input, extensionRegistry);
14471        }
14472        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseDelimitedFrom(java.io.InputStream input)
14473            throws java.io.IOException {
14474          return PARSER.parseDelimitedFrom(input);
14475        }
14476        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseDelimitedFrom(
14477            java.io.InputStream input,
14478            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14479            throws java.io.IOException {
14480          return PARSER.parseDelimitedFrom(input, extensionRegistry);
14481        }
14482        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
14483            com.google.protobuf.CodedInputStream input)
14484            throws java.io.IOException {
14485          return PARSER.parseFrom(input);
14486        }
14487        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
14488            com.google.protobuf.CodedInputStream input,
14489            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14490            throws java.io.IOException {
14491          return PARSER.parseFrom(input, extensionRegistry);
14492        }
14493    
14494        public static Builder newBuilder() { return Builder.create(); }
14495        public Builder newBuilderForType() { return newBuilder(); }
14496        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto prototype) {
14497          return newBuilder().mergeFrom(prototype);
14498        }
14499        public Builder toBuilder() { return newBuilder(this); }
14500    
14501        @java.lang.Override
14502        protected Builder newBuilderForType(
14503            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
14504          Builder builder = new Builder(parent);
14505          return builder;
14506        }
14507        /**
14508         * Protobuf type {@code hadoop.hdfs.PrepareRecoveryRequestProto}
14509         *
14510         * <pre>
14511         **
14512         * prepareRecovery()
14513         * </pre>
14514         */
14515        public static final class Builder extends
14516            com.google.protobuf.GeneratedMessage.Builder<Builder>
14517           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProtoOrBuilder {
14518          public static final com.google.protobuf.Descriptors.Descriptor
14519              getDescriptor() {
14520            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor;
14521          }
14522    
14523          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
14524              internalGetFieldAccessorTable() {
14525            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_fieldAccessorTable
14526                .ensureFieldAccessorsInitialized(
14527                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.Builder.class);
14528          }
14529    
14530          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.newBuilder()
14531          private Builder() {
14532            maybeForceBuilderInitialization();
14533          }
14534    
14535          private Builder(
14536              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
14537            super(parent);
14538            maybeForceBuilderInitialization();
14539          }
14540          private void maybeForceBuilderInitialization() {
14541            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
14542              getReqInfoFieldBuilder();
14543            }
14544          }
14545          private static Builder create() {
14546            return new Builder();
14547          }
14548    
14549          public Builder clear() {
14550            super.clear();
14551            if (reqInfoBuilder_ == null) {
14552              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
14553            } else {
14554              reqInfoBuilder_.clear();
14555            }
14556            bitField0_ = (bitField0_ & ~0x00000001);
14557            segmentTxId_ = 0L;
14558            bitField0_ = (bitField0_ & ~0x00000002);
14559            return this;
14560          }
14561    
14562          public Builder clone() {
14563            return create().mergeFrom(buildPartial());
14564          }
14565    
14566          public com.google.protobuf.Descriptors.Descriptor
14567              getDescriptorForType() {
14568            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor;
14569          }
14570    
14571          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto getDefaultInstanceForType() {
14572            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance();
14573          }
14574    
14575          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto build() {
14576            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto result = buildPartial();
14577            if (!result.isInitialized()) {
14578              throw newUninitializedMessageException(result);
14579            }
14580            return result;
14581          }
14582    
14583          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto buildPartial() {
14584            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto(this);
14585            int from_bitField0_ = bitField0_;
14586            int to_bitField0_ = 0;
14587            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
14588              to_bitField0_ |= 0x00000001;
14589            }
14590            if (reqInfoBuilder_ == null) {
14591              result.reqInfo_ = reqInfo_;
14592            } else {
14593              result.reqInfo_ = reqInfoBuilder_.build();
14594            }
14595            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
14596              to_bitField0_ |= 0x00000002;
14597            }
14598            result.segmentTxId_ = segmentTxId_;
14599            result.bitField0_ = to_bitField0_;
14600            onBuilt();
14601            return result;
14602          }
14603    
14604          public Builder mergeFrom(com.google.protobuf.Message other) {
14605            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto) {
14606              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)other);
14607            } else {
14608              super.mergeFrom(other);
14609              return this;
14610            }
14611          }
14612    
14613          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto other) {
14614            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance()) return this;
14615            if (other.hasReqInfo()) {
14616              mergeReqInfo(other.getReqInfo());
14617            }
14618            if (other.hasSegmentTxId()) {
14619              setSegmentTxId(other.getSegmentTxId());
14620            }
14621            this.mergeUnknownFields(other.getUnknownFields());
14622            return this;
14623          }
14624    
14625          public final boolean isInitialized() {
14626            if (!hasReqInfo()) {
14627              
14628              return false;
14629            }
14630            if (!hasSegmentTxId()) {
14631              
14632              return false;
14633            }
14634            if (!getReqInfo().isInitialized()) {
14635              
14636              return false;
14637            }
14638            return true;
14639          }
14640    
14641          public Builder mergeFrom(
14642              com.google.protobuf.CodedInputStream input,
14643              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14644              throws java.io.IOException {
14645            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parsedMessage = null;
14646            try {
14647              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
14648            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
14649              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto) e.getUnfinishedMessage();
14650              throw e;
14651            } finally {
14652              if (parsedMessage != null) {
14653                mergeFrom(parsedMessage);
14654              }
14655            }
14656            return this;
14657          }
14658          private int bitField0_;
14659    
14660          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
14661          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
14662          private com.google.protobuf.SingleFieldBuilder<
14663              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
14664          /**
14665           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14666           */
14667          public boolean hasReqInfo() {
14668            return ((bitField0_ & 0x00000001) == 0x00000001);
14669          }
14670          /**
14671           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14672           */
14673          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
14674            if (reqInfoBuilder_ == null) {
14675              return reqInfo_;
14676            } else {
14677              return reqInfoBuilder_.getMessage();
14678            }
14679          }
14680          /**
14681           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14682           */
14683          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
14684            if (reqInfoBuilder_ == null) {
14685              if (value == null) {
14686                throw new NullPointerException();
14687              }
14688              reqInfo_ = value;
14689              onChanged();
14690            } else {
14691              reqInfoBuilder_.setMessage(value);
14692            }
14693            bitField0_ |= 0x00000001;
14694            return this;
14695          }
14696          /**
14697           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14698           */
14699          public Builder setReqInfo(
14700              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
14701            if (reqInfoBuilder_ == null) {
14702              reqInfo_ = builderForValue.build();
14703              onChanged();
14704            } else {
14705              reqInfoBuilder_.setMessage(builderForValue.build());
14706            }
14707            bitField0_ |= 0x00000001;
14708            return this;
14709          }
14710          /**
14711           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14712           */
14713          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
14714            if (reqInfoBuilder_ == null) {
14715              if (((bitField0_ & 0x00000001) == 0x00000001) &&
14716                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
14717                reqInfo_ =
14718                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
14719              } else {
14720                reqInfo_ = value;
14721              }
14722              onChanged();
14723            } else {
14724              reqInfoBuilder_.mergeFrom(value);
14725            }
14726            bitField0_ |= 0x00000001;
14727            return this;
14728          }
14729          /**
14730           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14731           */
14732          public Builder clearReqInfo() {
14733            if (reqInfoBuilder_ == null) {
14734              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
14735              onChanged();
14736            } else {
14737              reqInfoBuilder_.clear();
14738            }
14739            bitField0_ = (bitField0_ & ~0x00000001);
14740            return this;
14741          }
14742          /**
14743           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14744           */
14745          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
14746            bitField0_ |= 0x00000001;
14747            onChanged();
14748            return getReqInfoFieldBuilder().getBuilder();
14749          }
14750          /**
14751           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14752           */
14753          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
14754            if (reqInfoBuilder_ != null) {
14755              return reqInfoBuilder_.getMessageOrBuilder();
14756            } else {
14757              return reqInfo_;
14758            }
14759          }
14760          /**
14761           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14762           */
14763          private com.google.protobuf.SingleFieldBuilder<
14764              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
14765              getReqInfoFieldBuilder() {
14766            if (reqInfoBuilder_ == null) {
14767              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
14768                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
14769                      reqInfo_,
14770                      getParentForChildren(),
14771                      isClean());
14772              reqInfo_ = null;
14773            }
14774            return reqInfoBuilder_;
14775          }
14776    
14777          // required uint64 segmentTxId = 2;
14778          private long segmentTxId_ ;
14779          /**
14780           * <code>required uint64 segmentTxId = 2;</code>
14781           */
14782          public boolean hasSegmentTxId() {
14783            return ((bitField0_ & 0x00000002) == 0x00000002);
14784          }
14785          /**
14786           * <code>required uint64 segmentTxId = 2;</code>
14787           */
14788          public long getSegmentTxId() {
14789            return segmentTxId_;
14790          }
14791          /**
14792           * <code>required uint64 segmentTxId = 2;</code>
14793           */
14794          public Builder setSegmentTxId(long value) {
14795            bitField0_ |= 0x00000002;
14796            segmentTxId_ = value;
14797            onChanged();
14798            return this;
14799          }
14800          /**
14801           * <code>required uint64 segmentTxId = 2;</code>
14802           */
14803          public Builder clearSegmentTxId() {
14804            bitField0_ = (bitField0_ & ~0x00000002);
14805            segmentTxId_ = 0L;
14806            onChanged();
14807            return this;
14808          }
14809    
14810          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PrepareRecoveryRequestProto)
14811        }
14812    
14813        static {
14814          defaultInstance = new PrepareRecoveryRequestProto(true);
14815          defaultInstance.initFields();
14816        }
14817    
14818        // @@protoc_insertion_point(class_scope:hadoop.hdfs.PrepareRecoveryRequestProto)
14819      }
14820    
14821      public interface PrepareRecoveryResponseProtoOrBuilder
14822          extends com.google.protobuf.MessageOrBuilder {
14823    
14824        // optional .hadoop.hdfs.SegmentStateProto segmentState = 1;
14825        /**
14826         * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
14827         */
14828        boolean hasSegmentState();
14829        /**
14830         * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
14831         */
14832        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState();
14833        /**
14834         * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
14835         */
14836        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder();
14837    
14838        // optional uint64 acceptedInEpoch = 2;
14839        /**
14840         * <code>optional uint64 acceptedInEpoch = 2;</code>
14841         */
14842        boolean hasAcceptedInEpoch();
14843        /**
14844         * <code>optional uint64 acceptedInEpoch = 2;</code>
14845         */
14846        long getAcceptedInEpoch();
14847    
14848        // required uint64 lastWriterEpoch = 3;
14849        /**
14850         * <code>required uint64 lastWriterEpoch = 3;</code>
14851         */
14852        boolean hasLastWriterEpoch();
14853        /**
14854         * <code>required uint64 lastWriterEpoch = 3;</code>
14855         */
14856        long getLastWriterEpoch();
14857    
14858        // optional uint64 lastCommittedTxId = 4;
14859        /**
14860         * <code>optional uint64 lastCommittedTxId = 4;</code>
14861         *
14862         * <pre>
14863         * The highest committed txid that this logger has ever seen.
14864         * This may be higher than the data it actually has, in the case
14865         * that it was lagging before the old writer crashed.
14866         * </pre>
14867         */
14868        boolean hasLastCommittedTxId();
14869        /**
14870         * <code>optional uint64 lastCommittedTxId = 4;</code>
14871         *
14872         * <pre>
14873         * The highest committed txid that this logger has ever seen.
14874         * This may be higher than the data it actually has, in the case
14875         * that it was lagging before the old writer crashed.
14876         * </pre>
14877         */
14878        long getLastCommittedTxId();
14879      }
14880      /**
14881       * Protobuf type {@code hadoop.hdfs.PrepareRecoveryResponseProto}
14882       */
14883      public static final class PrepareRecoveryResponseProto extends
14884          com.google.protobuf.GeneratedMessage
14885          implements PrepareRecoveryResponseProtoOrBuilder {
14886        // Use PrepareRecoveryResponseProto.newBuilder() to construct.
14887        private PrepareRecoveryResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
14888          super(builder);
14889          this.unknownFields = builder.getUnknownFields();
14890        }
14891        private PrepareRecoveryResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
14892    
14893        private static final PrepareRecoveryResponseProto defaultInstance;
14894        public static PrepareRecoveryResponseProto getDefaultInstance() {
14895          return defaultInstance;
14896        }
14897    
14898        public PrepareRecoveryResponseProto getDefaultInstanceForType() {
14899          return defaultInstance;
14900        }
14901    
14902        private final com.google.protobuf.UnknownFieldSet unknownFields;
14903        @java.lang.Override
14904        public final com.google.protobuf.UnknownFieldSet
14905            getUnknownFields() {
14906          return this.unknownFields;
14907        }
14908        private PrepareRecoveryResponseProto(
14909            com.google.protobuf.CodedInputStream input,
14910            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14911            throws com.google.protobuf.InvalidProtocolBufferException {
14912          initFields();
14913          int mutable_bitField0_ = 0;
14914          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
14915              com.google.protobuf.UnknownFieldSet.newBuilder();
14916          try {
14917            boolean done = false;
14918            while (!done) {
14919              int tag = input.readTag();
14920              switch (tag) {
14921                case 0:
14922                  done = true;
14923                  break;
14924                default: {
14925                  if (!parseUnknownField(input, unknownFields,
14926                                         extensionRegistry, tag)) {
14927                    done = true;
14928                  }
14929                  break;
14930                }
14931                case 10: {
14932                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder subBuilder = null;
14933                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
14934                    subBuilder = segmentState_.toBuilder();
14935                  }
14936                  segmentState_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.PARSER, extensionRegistry);
14937                  if (subBuilder != null) {
14938                    subBuilder.mergeFrom(segmentState_);
14939                    segmentState_ = subBuilder.buildPartial();
14940                  }
14941                  bitField0_ |= 0x00000001;
14942                  break;
14943                }
14944                case 16: {
14945                  bitField0_ |= 0x00000002;
14946                  acceptedInEpoch_ = input.readUInt64();
14947                  break;
14948                }
14949                case 24: {
14950                  bitField0_ |= 0x00000004;
14951                  lastWriterEpoch_ = input.readUInt64();
14952                  break;
14953                }
14954                case 32: {
14955                  bitField0_ |= 0x00000008;
14956                  lastCommittedTxId_ = input.readUInt64();
14957                  break;
14958                }
14959              }
14960            }
14961          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
14962            throw e.setUnfinishedMessage(this);
14963          } catch (java.io.IOException e) {
14964            throw new com.google.protobuf.InvalidProtocolBufferException(
14965                e.getMessage()).setUnfinishedMessage(this);
14966          } finally {
14967            this.unknownFields = unknownFields.build();
14968            makeExtensionsImmutable();
14969          }
14970        }
14971        public static final com.google.protobuf.Descriptors.Descriptor
14972            getDescriptor() {
14973          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor;
14974        }
14975    
14976        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
14977            internalGetFieldAccessorTable() {
14978          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_fieldAccessorTable
14979              .ensureFieldAccessorsInitialized(
14980                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.Builder.class);
14981        }
14982    
14983        public static com.google.protobuf.Parser<PrepareRecoveryResponseProto> PARSER =
14984            new com.google.protobuf.AbstractParser<PrepareRecoveryResponseProto>() {
14985          public PrepareRecoveryResponseProto parsePartialFrom(
14986              com.google.protobuf.CodedInputStream input,
14987              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14988              throws com.google.protobuf.InvalidProtocolBufferException {
14989            return new PrepareRecoveryResponseProto(input, extensionRegistry);
14990          }
14991        };
14992    
14993        @java.lang.Override
14994        public com.google.protobuf.Parser<PrepareRecoveryResponseProto> getParserForType() {
14995          return PARSER;
14996        }
14997    
14998        private int bitField0_;
14999        // optional .hadoop.hdfs.SegmentStateProto segmentState = 1;
15000        public static final int SEGMENTSTATE_FIELD_NUMBER = 1;
15001        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_;
15002        /**
15003         * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15004         */
15005        public boolean hasSegmentState() {
15006          return ((bitField0_ & 0x00000001) == 0x00000001);
15007        }
15008        /**
15009         * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15010         */
15011        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
15012          return segmentState_;
15013        }
15014        /**
15015         * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15016         */
15017        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
15018          return segmentState_;
15019        }
15020    
15021        // optional uint64 acceptedInEpoch = 2;
15022        public static final int ACCEPTEDINEPOCH_FIELD_NUMBER = 2;
15023        private long acceptedInEpoch_;
15024        /**
15025         * <code>optional uint64 acceptedInEpoch = 2;</code>
15026         */
15027        public boolean hasAcceptedInEpoch() {
15028          return ((bitField0_ & 0x00000002) == 0x00000002);
15029        }
15030        /**
15031         * <code>optional uint64 acceptedInEpoch = 2;</code>
15032         */
15033        public long getAcceptedInEpoch() {
15034          return acceptedInEpoch_;
15035        }
15036    
15037        // required uint64 lastWriterEpoch = 3;
15038        public static final int LASTWRITEREPOCH_FIELD_NUMBER = 3;
15039        private long lastWriterEpoch_;
15040        /**
15041         * <code>required uint64 lastWriterEpoch = 3;</code>
15042         */
15043        public boolean hasLastWriterEpoch() {
15044          return ((bitField0_ & 0x00000004) == 0x00000004);
15045        }
15046        /**
15047         * <code>required uint64 lastWriterEpoch = 3;</code>
15048         */
15049        public long getLastWriterEpoch() {
15050          return lastWriterEpoch_;
15051        }
15052    
15053        // optional uint64 lastCommittedTxId = 4;
15054        public static final int LASTCOMMITTEDTXID_FIELD_NUMBER = 4;
15055        private long lastCommittedTxId_;
15056        /**
15057         * <code>optional uint64 lastCommittedTxId = 4;</code>
15058         *
15059         * <pre>
15060         * The highest committed txid that this logger has ever seen.
15061         * This may be higher than the data it actually has, in the case
15062         * that it was lagging before the old writer crashed.
15063         * </pre>
15064         */
15065        public boolean hasLastCommittedTxId() {
15066          return ((bitField0_ & 0x00000008) == 0x00000008);
15067        }
15068        /**
15069         * <code>optional uint64 lastCommittedTxId = 4;</code>
15070         *
15071         * <pre>
15072         * The highest committed txid that this logger has ever seen.
15073         * This may be higher than the data it actually has, in the case
15074         * that it was lagging before the old writer crashed.
15075         * </pre>
15076         */
15077        public long getLastCommittedTxId() {
15078          return lastCommittedTxId_;
15079        }
15080    
15081        private void initFields() {
15082          segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
15083          acceptedInEpoch_ = 0L;
15084          lastWriterEpoch_ = 0L;
15085          lastCommittedTxId_ = 0L;
15086        }
15087        private byte memoizedIsInitialized = -1;
15088        public final boolean isInitialized() {
15089          byte isInitialized = memoizedIsInitialized;
15090          if (isInitialized != -1) return isInitialized == 1;
15091    
15092          if (!hasLastWriterEpoch()) {
15093            memoizedIsInitialized = 0;
15094            return false;
15095          }
15096          if (hasSegmentState()) {
15097            if (!getSegmentState().isInitialized()) {
15098              memoizedIsInitialized = 0;
15099              return false;
15100            }
15101          }
15102          memoizedIsInitialized = 1;
15103          return true;
15104        }
15105    
15106        public void writeTo(com.google.protobuf.CodedOutputStream output)
15107                            throws java.io.IOException {
15108          getSerializedSize();
15109          if (((bitField0_ & 0x00000001) == 0x00000001)) {
15110            output.writeMessage(1, segmentState_);
15111          }
15112          if (((bitField0_ & 0x00000002) == 0x00000002)) {
15113            output.writeUInt64(2, acceptedInEpoch_);
15114          }
15115          if (((bitField0_ & 0x00000004) == 0x00000004)) {
15116            output.writeUInt64(3, lastWriterEpoch_);
15117          }
15118          if (((bitField0_ & 0x00000008) == 0x00000008)) {
15119            output.writeUInt64(4, lastCommittedTxId_);
15120          }
15121          getUnknownFields().writeTo(output);
15122        }
15123    
15124        private int memoizedSerializedSize = -1;
15125        public int getSerializedSize() {
15126          int size = memoizedSerializedSize;
15127          if (size != -1) return size;
15128    
15129          size = 0;
15130          if (((bitField0_ & 0x00000001) == 0x00000001)) {
15131            size += com.google.protobuf.CodedOutputStream
15132              .computeMessageSize(1, segmentState_);
15133          }
15134          if (((bitField0_ & 0x00000002) == 0x00000002)) {
15135            size += com.google.protobuf.CodedOutputStream
15136              .computeUInt64Size(2, acceptedInEpoch_);
15137          }
15138          if (((bitField0_ & 0x00000004) == 0x00000004)) {
15139            size += com.google.protobuf.CodedOutputStream
15140              .computeUInt64Size(3, lastWriterEpoch_);
15141          }
15142          if (((bitField0_ & 0x00000008) == 0x00000008)) {
15143            size += com.google.protobuf.CodedOutputStream
15144              .computeUInt64Size(4, lastCommittedTxId_);
15145          }
15146          size += getUnknownFields().getSerializedSize();
15147          memoizedSerializedSize = size;
15148          return size;
15149        }
15150    
15151        private static final long serialVersionUID = 0L;
15152        @java.lang.Override
15153        protected java.lang.Object writeReplace()
15154            throws java.io.ObjectStreamException {
15155          return super.writeReplace();
15156        }
15157    
15158        @java.lang.Override
15159        public boolean equals(final java.lang.Object obj) {
15160          if (obj == this) {
15161           return true;
15162          }
15163          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto)) {
15164            return super.equals(obj);
15165          }
15166          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) obj;
15167    
15168          boolean result = true;
15169          result = result && (hasSegmentState() == other.hasSegmentState());
15170          if (hasSegmentState()) {
15171            result = result && getSegmentState()
15172                .equals(other.getSegmentState());
15173          }
15174          result = result && (hasAcceptedInEpoch() == other.hasAcceptedInEpoch());
15175          if (hasAcceptedInEpoch()) {
15176            result = result && (getAcceptedInEpoch()
15177                == other.getAcceptedInEpoch());
15178          }
15179          result = result && (hasLastWriterEpoch() == other.hasLastWriterEpoch());
15180          if (hasLastWriterEpoch()) {
15181            result = result && (getLastWriterEpoch()
15182                == other.getLastWriterEpoch());
15183          }
15184          result = result && (hasLastCommittedTxId() == other.hasLastCommittedTxId());
15185          if (hasLastCommittedTxId()) {
15186            result = result && (getLastCommittedTxId()
15187                == other.getLastCommittedTxId());
15188          }
15189          result = result &&
15190              getUnknownFields().equals(other.getUnknownFields());
15191          return result;
15192        }
15193    
15194        private int memoizedHashCode = 0;
15195        @java.lang.Override
15196        public int hashCode() {
15197          if (memoizedHashCode != 0) {
15198            return memoizedHashCode;
15199          }
15200          int hash = 41;
15201          hash = (19 * hash) + getDescriptorForType().hashCode();
15202          if (hasSegmentState()) {
15203            hash = (37 * hash) + SEGMENTSTATE_FIELD_NUMBER;
15204            hash = (53 * hash) + getSegmentState().hashCode();
15205          }
15206          if (hasAcceptedInEpoch()) {
15207            hash = (37 * hash) + ACCEPTEDINEPOCH_FIELD_NUMBER;
15208            hash = (53 * hash) + hashLong(getAcceptedInEpoch());
15209          }
15210          if (hasLastWriterEpoch()) {
15211            hash = (37 * hash) + LASTWRITEREPOCH_FIELD_NUMBER;
15212            hash = (53 * hash) + hashLong(getLastWriterEpoch());
15213          }
15214          if (hasLastCommittedTxId()) {
15215            hash = (37 * hash) + LASTCOMMITTEDTXID_FIELD_NUMBER;
15216            hash = (53 * hash) + hashLong(getLastCommittedTxId());
15217          }
15218          hash = (29 * hash) + getUnknownFields().hashCode();
15219          memoizedHashCode = hash;
15220          return hash;
15221        }
15222    
15223        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
15224            com.google.protobuf.ByteString data)
15225            throws com.google.protobuf.InvalidProtocolBufferException {
15226          return PARSER.parseFrom(data);
15227        }
15228        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
15229            com.google.protobuf.ByteString data,
15230            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15231            throws com.google.protobuf.InvalidProtocolBufferException {
15232          return PARSER.parseFrom(data, extensionRegistry);
15233        }
15234        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(byte[] data)
15235            throws com.google.protobuf.InvalidProtocolBufferException {
15236          return PARSER.parseFrom(data);
15237        }
15238        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
15239            byte[] data,
15240            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15241            throws com.google.protobuf.InvalidProtocolBufferException {
15242          return PARSER.parseFrom(data, extensionRegistry);
15243        }
15244        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(java.io.InputStream input)
15245            throws java.io.IOException {
15246          return PARSER.parseFrom(input);
15247        }
15248        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
15249            java.io.InputStream input,
15250            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15251            throws java.io.IOException {
15252          return PARSER.parseFrom(input, extensionRegistry);
15253        }
15254        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseDelimitedFrom(java.io.InputStream input)
15255            throws java.io.IOException {
15256          return PARSER.parseDelimitedFrom(input);
15257        }
15258        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseDelimitedFrom(
15259            java.io.InputStream input,
15260            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15261            throws java.io.IOException {
15262          return PARSER.parseDelimitedFrom(input, extensionRegistry);
15263        }
15264        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
15265            com.google.protobuf.CodedInputStream input)
15266            throws java.io.IOException {
15267          return PARSER.parseFrom(input);
15268        }
15269        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
15270            com.google.protobuf.CodedInputStream input,
15271            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15272            throws java.io.IOException {
15273          return PARSER.parseFrom(input, extensionRegistry);
15274        }
15275    
15276        public static Builder newBuilder() { return Builder.create(); }
15277        public Builder newBuilderForType() { return newBuilder(); }
15278        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto prototype) {
15279          return newBuilder().mergeFrom(prototype);
15280        }
15281        public Builder toBuilder() { return newBuilder(this); }
15282    
15283        @java.lang.Override
15284        protected Builder newBuilderForType(
15285            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
15286          Builder builder = new Builder(parent);
15287          return builder;
15288        }
15289        /**
15290         * Protobuf type {@code hadoop.hdfs.PrepareRecoveryResponseProto}
15291         */
15292        public static final class Builder extends
15293            com.google.protobuf.GeneratedMessage.Builder<Builder>
15294           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProtoOrBuilder {
15295          public static final com.google.protobuf.Descriptors.Descriptor
15296              getDescriptor() {
15297            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor;
15298          }
15299    
15300          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
15301              internalGetFieldAccessorTable() {
15302            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_fieldAccessorTable
15303                .ensureFieldAccessorsInitialized(
15304                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.Builder.class);
15305          }
15306    
15307          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.newBuilder()
15308          private Builder() {
15309            maybeForceBuilderInitialization();
15310          }
15311    
15312          private Builder(
15313              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
15314            super(parent);
15315            maybeForceBuilderInitialization();
15316          }
15317          private void maybeForceBuilderInitialization() {
15318            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
15319              getSegmentStateFieldBuilder();
15320            }
15321          }
15322          private static Builder create() {
15323            return new Builder();
15324          }
15325    
15326          public Builder clear() {
15327            super.clear();
15328            if (segmentStateBuilder_ == null) {
15329              segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
15330            } else {
15331              segmentStateBuilder_.clear();
15332            }
15333            bitField0_ = (bitField0_ & ~0x00000001);
15334            acceptedInEpoch_ = 0L;
15335            bitField0_ = (bitField0_ & ~0x00000002);
15336            lastWriterEpoch_ = 0L;
15337            bitField0_ = (bitField0_ & ~0x00000004);
15338            lastCommittedTxId_ = 0L;
15339            bitField0_ = (bitField0_ & ~0x00000008);
15340            return this;
15341          }
15342    
15343          public Builder clone() {
15344            return create().mergeFrom(buildPartial());
15345          }
15346    
15347          public com.google.protobuf.Descriptors.Descriptor
15348              getDescriptorForType() {
15349            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor;
15350          }
15351    
15352          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto getDefaultInstanceForType() {
15353            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance();
15354          }
15355    
15356          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto build() {
15357            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto result = buildPartial();
15358            if (!result.isInitialized()) {
15359              throw newUninitializedMessageException(result);
15360            }
15361            return result;
15362          }
15363    
15364          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto buildPartial() {
15365            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto(this);
15366            int from_bitField0_ = bitField0_;
15367            int to_bitField0_ = 0;
15368            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
15369              to_bitField0_ |= 0x00000001;
15370            }
15371            if (segmentStateBuilder_ == null) {
15372              result.segmentState_ = segmentState_;
15373            } else {
15374              result.segmentState_ = segmentStateBuilder_.build();
15375            }
15376            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
15377              to_bitField0_ |= 0x00000002;
15378            }
15379            result.acceptedInEpoch_ = acceptedInEpoch_;
15380            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
15381              to_bitField0_ |= 0x00000004;
15382            }
15383            result.lastWriterEpoch_ = lastWriterEpoch_;
15384            if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
15385              to_bitField0_ |= 0x00000008;
15386            }
15387            result.lastCommittedTxId_ = lastCommittedTxId_;
15388            result.bitField0_ = to_bitField0_;
15389            onBuilt();
15390            return result;
15391          }
15392    
15393          public Builder mergeFrom(com.google.protobuf.Message other) {
15394            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) {
15395              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto)other);
15396            } else {
15397              super.mergeFrom(other);
15398              return this;
15399            }
15400          }
15401    
15402          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto other) {
15403            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance()) return this;
15404            if (other.hasSegmentState()) {
15405              mergeSegmentState(other.getSegmentState());
15406            }
15407            if (other.hasAcceptedInEpoch()) {
15408              setAcceptedInEpoch(other.getAcceptedInEpoch());
15409            }
15410            if (other.hasLastWriterEpoch()) {
15411              setLastWriterEpoch(other.getLastWriterEpoch());
15412            }
15413            if (other.hasLastCommittedTxId()) {
15414              setLastCommittedTxId(other.getLastCommittedTxId());
15415            }
15416            this.mergeUnknownFields(other.getUnknownFields());
15417            return this;
15418          }
15419    
15420          public final boolean isInitialized() {
15421            if (!hasLastWriterEpoch()) {
15422              
15423              return false;
15424            }
15425            if (hasSegmentState()) {
15426              if (!getSegmentState().isInitialized()) {
15427                
15428                return false;
15429              }
15430            }
15431            return true;
15432          }
15433    
15434          public Builder mergeFrom(
15435              com.google.protobuf.CodedInputStream input,
15436              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15437              throws java.io.IOException {
15438            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parsedMessage = null;
15439            try {
15440              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
15441            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
15442              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) e.getUnfinishedMessage();
15443              throw e;
15444            } finally {
15445              if (parsedMessage != null) {
15446                mergeFrom(parsedMessage);
15447              }
15448            }
15449            return this;
15450          }
15451          private int bitField0_;
15452    
15453          // optional .hadoop.hdfs.SegmentStateProto segmentState = 1;
15454          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
15455          private com.google.protobuf.SingleFieldBuilder<
15456              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> segmentStateBuilder_;
15457          /**
15458           * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15459           */
15460          public boolean hasSegmentState() {
15461            return ((bitField0_ & 0x00000001) == 0x00000001);
15462          }
15463          /**
15464           * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15465           */
15466          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
15467            if (segmentStateBuilder_ == null) {
15468              return segmentState_;
15469            } else {
15470              return segmentStateBuilder_.getMessage();
15471            }
15472          }
15473          /**
15474           * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15475           */
15476          public Builder setSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
15477            if (segmentStateBuilder_ == null) {
15478              if (value == null) {
15479                throw new NullPointerException();
15480              }
15481              segmentState_ = value;
15482              onChanged();
15483            } else {
15484              segmentStateBuilder_.setMessage(value);
15485            }
15486            bitField0_ |= 0x00000001;
15487            return this;
15488          }
15489          /**
15490           * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15491           */
15492          public Builder setSegmentState(
15493              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) {
15494            if (segmentStateBuilder_ == null) {
15495              segmentState_ = builderForValue.build();
15496              onChanged();
15497            } else {
15498              segmentStateBuilder_.setMessage(builderForValue.build());
15499            }
15500            bitField0_ |= 0x00000001;
15501            return this;
15502          }
15503          /**
15504           * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15505           */
15506          public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
15507            if (segmentStateBuilder_ == null) {
15508              if (((bitField0_ & 0x00000001) == 0x00000001) &&
15509                  segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
15510                segmentState_ =
15511                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial();
15512              } else {
15513                segmentState_ = value;
15514              }
15515              onChanged();
15516            } else {
15517              segmentStateBuilder_.mergeFrom(value);
15518            }
15519            bitField0_ |= 0x00000001;
15520            return this;
15521          }
15522          /**
15523           * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15524           */
15525          public Builder clearSegmentState() {
15526            if (segmentStateBuilder_ == null) {
15527              segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
15528              onChanged();
15529            } else {
15530              segmentStateBuilder_.clear();
15531            }
15532            bitField0_ = (bitField0_ & ~0x00000001);
15533            return this;
15534          }
15535          /**
15536           * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15537           */
15538          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getSegmentStateBuilder() {
15539            bitField0_ |= 0x00000001;
15540            onChanged();
15541            return getSegmentStateFieldBuilder().getBuilder();
15542          }
15543          /**
15544           * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15545           */
15546          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
15547            if (segmentStateBuilder_ != null) {
15548              return segmentStateBuilder_.getMessageOrBuilder();
15549            } else {
15550              return segmentState_;
15551            }
15552          }
15553          /**
15554           * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15555           */
15556          private com.google.protobuf.SingleFieldBuilder<
15557              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> 
15558              getSegmentStateFieldBuilder() {
15559            if (segmentStateBuilder_ == null) {
15560              segmentStateBuilder_ = new com.google.protobuf.SingleFieldBuilder<
15561                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>(
15562                      segmentState_,
15563                      getParentForChildren(),
15564                      isClean());
15565              segmentState_ = null;
15566            }
15567            return segmentStateBuilder_;
15568          }
15569    
15570          // optional uint64 acceptedInEpoch = 2;
15571          private long acceptedInEpoch_ ;
15572          /**
15573           * <code>optional uint64 acceptedInEpoch = 2;</code>
15574           */
15575          public boolean hasAcceptedInEpoch() {
15576            return ((bitField0_ & 0x00000002) == 0x00000002);
15577          }
15578          /**
15579           * <code>optional uint64 acceptedInEpoch = 2;</code>
15580           */
15581          public long getAcceptedInEpoch() {
15582            return acceptedInEpoch_;
15583          }
15584          /**
15585           * <code>optional uint64 acceptedInEpoch = 2;</code>
15586           */
15587          public Builder setAcceptedInEpoch(long value) {
15588            bitField0_ |= 0x00000002;
15589            acceptedInEpoch_ = value;
15590            onChanged();
15591            return this;
15592          }
15593          /**
15594           * <code>optional uint64 acceptedInEpoch = 2;</code>
15595           */
15596          public Builder clearAcceptedInEpoch() {
15597            bitField0_ = (bitField0_ & ~0x00000002);
15598            acceptedInEpoch_ = 0L;
15599            onChanged();
15600            return this;
15601          }
15602    
15603          // required uint64 lastWriterEpoch = 3;
15604          private long lastWriterEpoch_ ;
15605          /**
15606           * <code>required uint64 lastWriterEpoch = 3;</code>
15607           */
15608          public boolean hasLastWriterEpoch() {
15609            return ((bitField0_ & 0x00000004) == 0x00000004);
15610          }
15611          /**
15612           * <code>required uint64 lastWriterEpoch = 3;</code>
15613           */
15614          public long getLastWriterEpoch() {
15615            return lastWriterEpoch_;
15616          }
15617          /**
15618           * <code>required uint64 lastWriterEpoch = 3;</code>
15619           */
15620          public Builder setLastWriterEpoch(long value) {
15621            bitField0_ |= 0x00000004;
15622            lastWriterEpoch_ = value;
15623            onChanged();
15624            return this;
15625          }
15626          /**
15627           * <code>required uint64 lastWriterEpoch = 3;</code>
15628           */
15629          public Builder clearLastWriterEpoch() {
15630            bitField0_ = (bitField0_ & ~0x00000004);
15631            lastWriterEpoch_ = 0L;
15632            onChanged();
15633            return this;
15634          }
15635    
15636          // optional uint64 lastCommittedTxId = 4;
15637          private long lastCommittedTxId_ ;
15638          /**
15639           * <code>optional uint64 lastCommittedTxId = 4;</code>
15640           *
15641           * <pre>
15642           * The highest committed txid that this logger has ever seen.
15643           * This may be higher than the data it actually has, in the case
15644           * that it was lagging before the old writer crashed.
15645           * </pre>
15646           */
15647          public boolean hasLastCommittedTxId() {
15648            return ((bitField0_ & 0x00000008) == 0x00000008);
15649          }
15650          /**
15651           * <code>optional uint64 lastCommittedTxId = 4;</code>
15652           *
15653           * <pre>
15654           * The highest committed txid that this logger has ever seen.
15655           * This may be higher than the data it actually has, in the case
15656           * that it was lagging before the old writer crashed.
15657           * </pre>
15658           */
15659          public long getLastCommittedTxId() {
15660            return lastCommittedTxId_;
15661          }
15662          /**
15663           * <code>optional uint64 lastCommittedTxId = 4;</code>
15664           *
15665           * <pre>
15666           * The highest committed txid that this logger has ever seen.
15667           * This may be higher than the data it actually has, in the case
15668           * that it was lagging before the old writer crashed.
15669           * </pre>
15670           */
15671          public Builder setLastCommittedTxId(long value) {
15672            bitField0_ |= 0x00000008;
15673            lastCommittedTxId_ = value;
15674            onChanged();
15675            return this;
15676          }
15677          /**
15678           * <code>optional uint64 lastCommittedTxId = 4;</code>
15679           *
15680           * <pre>
15681           * The highest committed txid that this logger has ever seen.
15682           * This may be higher than the data it actually has, in the case
15683           * that it was lagging before the old writer crashed.
15684           * </pre>
15685           */
15686          public Builder clearLastCommittedTxId() {
15687            bitField0_ = (bitField0_ & ~0x00000008);
15688            lastCommittedTxId_ = 0L;
15689            onChanged();
15690            return this;
15691          }
15692    
15693          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PrepareRecoveryResponseProto)
15694        }
15695    
15696        static {
15697          defaultInstance = new PrepareRecoveryResponseProto(true);
15698          defaultInstance.initFields();
15699        }
15700    
15701        // @@protoc_insertion_point(class_scope:hadoop.hdfs.PrepareRecoveryResponseProto)
15702      }
15703    
15704      public interface AcceptRecoveryRequestProtoOrBuilder
15705          extends com.google.protobuf.MessageOrBuilder {
15706    
15707        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
15708        /**
15709         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
15710         */
15711        boolean hasReqInfo();
15712        /**
15713         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
15714         */
15715        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
15716        /**
15717         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
15718         */
15719        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
15720    
15721        // required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;
15722        /**
15723         * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
15724         *
15725         * <pre>
15726         ** Details on the segment to recover 
15727         * </pre>
15728         */
15729        boolean hasStateToAccept();
15730        /**
15731         * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
15732         *
15733         * <pre>
15734         ** Details on the segment to recover 
15735         * </pre>
15736         */
15737        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getStateToAccept();
15738        /**
15739         * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
15740         *
15741         * <pre>
15742         ** Details on the segment to recover 
15743         * </pre>
15744         */
15745        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getStateToAcceptOrBuilder();
15746    
15747        // required string fromURL = 3;
15748        /**
15749         * <code>required string fromURL = 3;</code>
15750         *
15751         * <pre>
15752         ** The URL from which the log may be copied 
15753         * </pre>
15754         */
15755        boolean hasFromURL();
15756        /**
15757         * <code>required string fromURL = 3;</code>
15758         *
15759         * <pre>
15760         ** The URL from which the log may be copied 
15761         * </pre>
15762         */
15763        java.lang.String getFromURL();
15764        /**
15765         * <code>required string fromURL = 3;</code>
15766         *
15767         * <pre>
15768         ** The URL from which the log may be copied 
15769         * </pre>
15770         */
15771        com.google.protobuf.ByteString
15772            getFromURLBytes();
15773      }
15774      /**
15775       * Protobuf type {@code hadoop.hdfs.AcceptRecoveryRequestProto}
15776       *
15777       * <pre>
15778       **
15779       * acceptRecovery()
15780       * </pre>
15781       */
15782      public static final class AcceptRecoveryRequestProto extends
15783          com.google.protobuf.GeneratedMessage
15784          implements AcceptRecoveryRequestProtoOrBuilder {
15785        // Use AcceptRecoveryRequestProto.newBuilder() to construct.
15786        private AcceptRecoveryRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
15787          super(builder);
15788          this.unknownFields = builder.getUnknownFields();
15789        }
15790        private AcceptRecoveryRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
15791    
15792        private static final AcceptRecoveryRequestProto defaultInstance;
15793        public static AcceptRecoveryRequestProto getDefaultInstance() {
15794          return defaultInstance;
15795        }
15796    
15797        public AcceptRecoveryRequestProto getDefaultInstanceForType() {
15798          return defaultInstance;
15799        }
15800    
15801        private final com.google.protobuf.UnknownFieldSet unknownFields;
15802        @java.lang.Override
15803        public final com.google.protobuf.UnknownFieldSet
15804            getUnknownFields() {
15805          return this.unknownFields;
15806        }
15807        private AcceptRecoveryRequestProto(
15808            com.google.protobuf.CodedInputStream input,
15809            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15810            throws com.google.protobuf.InvalidProtocolBufferException {
15811          initFields();
15812          int mutable_bitField0_ = 0;
15813          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
15814              com.google.protobuf.UnknownFieldSet.newBuilder();
15815          try {
15816            boolean done = false;
15817            while (!done) {
15818              int tag = input.readTag();
15819              switch (tag) {
15820                case 0:
15821                  done = true;
15822                  break;
15823                default: {
15824                  if (!parseUnknownField(input, unknownFields,
15825                                         extensionRegistry, tag)) {
15826                    done = true;
15827                  }
15828                  break;
15829                }
15830                case 10: {
15831                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
15832                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
15833                    subBuilder = reqInfo_.toBuilder();
15834                  }
15835                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
15836                  if (subBuilder != null) {
15837                    subBuilder.mergeFrom(reqInfo_);
15838                    reqInfo_ = subBuilder.buildPartial();
15839                  }
15840                  bitField0_ |= 0x00000001;
15841                  break;
15842                }
15843                case 18: {
15844                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder subBuilder = null;
15845                  if (((bitField0_ & 0x00000002) == 0x00000002)) {
15846                    subBuilder = stateToAccept_.toBuilder();
15847                  }
15848                  stateToAccept_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.PARSER, extensionRegistry);
15849                  if (subBuilder != null) {
15850                    subBuilder.mergeFrom(stateToAccept_);
15851                    stateToAccept_ = subBuilder.buildPartial();
15852                  }
15853                  bitField0_ |= 0x00000002;
15854                  break;
15855                }
15856                case 26: {
15857                  bitField0_ |= 0x00000004;
15858                  fromURL_ = input.readBytes();
15859                  break;
15860                }
15861              }
15862            }
15863          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
15864            throw e.setUnfinishedMessage(this);
15865          } catch (java.io.IOException e) {
15866            throw new com.google.protobuf.InvalidProtocolBufferException(
15867                e.getMessage()).setUnfinishedMessage(this);
15868          } finally {
15869            this.unknownFields = unknownFields.build();
15870            makeExtensionsImmutable();
15871          }
15872        }
15873        public static final com.google.protobuf.Descriptors.Descriptor
15874            getDescriptor() {
15875          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor;
15876        }
15877    
15878        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
15879            internalGetFieldAccessorTable() {
15880          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_fieldAccessorTable
15881              .ensureFieldAccessorsInitialized(
15882                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.Builder.class);
15883        }
15884    
15885        public static com.google.protobuf.Parser<AcceptRecoveryRequestProto> PARSER =
15886            new com.google.protobuf.AbstractParser<AcceptRecoveryRequestProto>() {
15887          public AcceptRecoveryRequestProto parsePartialFrom(
15888              com.google.protobuf.CodedInputStream input,
15889              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15890              throws com.google.protobuf.InvalidProtocolBufferException {
15891            return new AcceptRecoveryRequestProto(input, extensionRegistry);
15892          }
15893        };
15894    
15895        @java.lang.Override
15896        public com.google.protobuf.Parser<AcceptRecoveryRequestProto> getParserForType() {
15897          return PARSER;
15898        }
15899    
15900        private int bitField0_;
15901        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
15902        public static final int REQINFO_FIELD_NUMBER = 1;
15903        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
15904        /**
15905         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
15906         */
15907        public boolean hasReqInfo() {
15908          return ((bitField0_ & 0x00000001) == 0x00000001);
15909        }
15910        /**
15911         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
15912         */
15913        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
15914          return reqInfo_;
15915        }
15916        /**
15917         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
15918         */
15919        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
15920          return reqInfo_;
15921        }
15922    
15923        // required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;
15924        public static final int STATETOACCEPT_FIELD_NUMBER = 2;
15925        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto stateToAccept_;
15926        /**
15927         * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
15928         *
15929         * <pre>
15930         ** Details on the segment to recover 
15931         * </pre>
15932         */
15933        public boolean hasStateToAccept() {
15934          return ((bitField0_ & 0x00000002) == 0x00000002);
15935        }
15936        /**
15937         * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
15938         *
15939         * <pre>
15940         ** Details on the segment to recover 
15941         * </pre>
15942         */
15943        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getStateToAccept() {
15944          return stateToAccept_;
15945        }
15946        /**
15947         * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
15948         *
15949         * <pre>
15950         ** Details on the segment to recover 
15951         * </pre>
15952         */
15953        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getStateToAcceptOrBuilder() {
15954          return stateToAccept_;
15955        }
15956    
15957        // required string fromURL = 3;
15958        public static final int FROMURL_FIELD_NUMBER = 3;
15959        private java.lang.Object fromURL_;
15960        /**
15961         * <code>required string fromURL = 3;</code>
15962         *
15963         * <pre>
15964         ** The URL from which the log may be copied 
15965         * </pre>
15966         */
15967        public boolean hasFromURL() {
15968          return ((bitField0_ & 0x00000004) == 0x00000004);
15969        }
15970        /**
15971         * <code>required string fromURL = 3;</code>
15972         *
15973         * <pre>
15974         ** The URL from which the log may be copied 
15975         * </pre>
15976         */
15977        public java.lang.String getFromURL() {
15978          java.lang.Object ref = fromURL_;
15979          if (ref instanceof java.lang.String) {
15980            return (java.lang.String) ref;
15981          } else {
15982            com.google.protobuf.ByteString bs = 
15983                (com.google.protobuf.ByteString) ref;
15984            java.lang.String s = bs.toStringUtf8();
15985            if (bs.isValidUtf8()) {
15986              fromURL_ = s;
15987            }
15988            return s;
15989          }
15990        }
15991        /**
15992         * <code>required string fromURL = 3;</code>
15993         *
15994         * <pre>
15995         ** The URL from which the log may be copied 
15996         * </pre>
15997         */
15998        public com.google.protobuf.ByteString
15999            getFromURLBytes() {
16000          java.lang.Object ref = fromURL_;
16001          if (ref instanceof java.lang.String) {
16002            com.google.protobuf.ByteString b = 
16003                com.google.protobuf.ByteString.copyFromUtf8(
16004                    (java.lang.String) ref);
16005            fromURL_ = b;
16006            return b;
16007          } else {
16008            return (com.google.protobuf.ByteString) ref;
16009          }
16010        }
16011    
16012        private void initFields() {
16013          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
16014          stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
16015          fromURL_ = "";
16016        }
16017        private byte memoizedIsInitialized = -1;
16018        public final boolean isInitialized() {
16019          byte isInitialized = memoizedIsInitialized;
16020          if (isInitialized != -1) return isInitialized == 1;
16021    
16022          if (!hasReqInfo()) {
16023            memoizedIsInitialized = 0;
16024            return false;
16025          }
16026          if (!hasStateToAccept()) {
16027            memoizedIsInitialized = 0;
16028            return false;
16029          }
16030          if (!hasFromURL()) {
16031            memoizedIsInitialized = 0;
16032            return false;
16033          }
16034          if (!getReqInfo().isInitialized()) {
16035            memoizedIsInitialized = 0;
16036            return false;
16037          }
16038          if (!getStateToAccept().isInitialized()) {
16039            memoizedIsInitialized = 0;
16040            return false;
16041          }
16042          memoizedIsInitialized = 1;
16043          return true;
16044        }
16045    
16046        public void writeTo(com.google.protobuf.CodedOutputStream output)
16047                            throws java.io.IOException {
16048          getSerializedSize();
16049          if (((bitField0_ & 0x00000001) == 0x00000001)) {
16050            output.writeMessage(1, reqInfo_);
16051          }
16052          if (((bitField0_ & 0x00000002) == 0x00000002)) {
16053            output.writeMessage(2, stateToAccept_);
16054          }
16055          if (((bitField0_ & 0x00000004) == 0x00000004)) {
16056            output.writeBytes(3, getFromURLBytes());
16057          }
16058          getUnknownFields().writeTo(output);
16059        }
16060    
16061        private int memoizedSerializedSize = -1;
16062        public int getSerializedSize() {
16063          int size = memoizedSerializedSize;
16064          if (size != -1) return size;
16065    
16066          size = 0;
16067          if (((bitField0_ & 0x00000001) == 0x00000001)) {
16068            size += com.google.protobuf.CodedOutputStream
16069              .computeMessageSize(1, reqInfo_);
16070          }
16071          if (((bitField0_ & 0x00000002) == 0x00000002)) {
16072            size += com.google.protobuf.CodedOutputStream
16073              .computeMessageSize(2, stateToAccept_);
16074          }
16075          if (((bitField0_ & 0x00000004) == 0x00000004)) {
16076            size += com.google.protobuf.CodedOutputStream
16077              .computeBytesSize(3, getFromURLBytes());
16078          }
16079          size += getUnknownFields().getSerializedSize();
16080          memoizedSerializedSize = size;
16081          return size;
16082        }
16083    
16084        private static final long serialVersionUID = 0L;
16085        @java.lang.Override
16086        protected java.lang.Object writeReplace()
16087            throws java.io.ObjectStreamException {
16088          return super.writeReplace();
16089        }
16090    
16091        @java.lang.Override
16092        public boolean equals(final java.lang.Object obj) {
16093          if (obj == this) {
16094           return true;
16095          }
16096          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)) {
16097            return super.equals(obj);
16098          }
16099          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto) obj;
16100    
16101          boolean result = true;
16102          result = result && (hasReqInfo() == other.hasReqInfo());
16103          if (hasReqInfo()) {
16104            result = result && getReqInfo()
16105                .equals(other.getReqInfo());
16106          }
16107          result = result && (hasStateToAccept() == other.hasStateToAccept());
16108          if (hasStateToAccept()) {
16109            result = result && getStateToAccept()
16110                .equals(other.getStateToAccept());
16111          }
16112          result = result && (hasFromURL() == other.hasFromURL());
16113          if (hasFromURL()) {
16114            result = result && getFromURL()
16115                .equals(other.getFromURL());
16116          }
16117          result = result &&
16118              getUnknownFields().equals(other.getUnknownFields());
16119          return result;
16120        }
16121    
16122        private int memoizedHashCode = 0;
16123        @java.lang.Override
16124        public int hashCode() {
16125          if (memoizedHashCode != 0) {
16126            return memoizedHashCode;
16127          }
16128          int hash = 41;
16129          hash = (19 * hash) + getDescriptorForType().hashCode();
16130          if (hasReqInfo()) {
16131            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
16132            hash = (53 * hash) + getReqInfo().hashCode();
16133          }
16134          if (hasStateToAccept()) {
16135            hash = (37 * hash) + STATETOACCEPT_FIELD_NUMBER;
16136            hash = (53 * hash) + getStateToAccept().hashCode();
16137          }
16138          if (hasFromURL()) {
16139            hash = (37 * hash) + FROMURL_FIELD_NUMBER;
16140            hash = (53 * hash) + getFromURL().hashCode();
16141          }
16142          hash = (29 * hash) + getUnknownFields().hashCode();
16143          memoizedHashCode = hash;
16144          return hash;
16145        }
16146    
16147        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
16148            com.google.protobuf.ByteString data)
16149            throws com.google.protobuf.InvalidProtocolBufferException {
16150          return PARSER.parseFrom(data);
16151        }
16152        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
16153            com.google.protobuf.ByteString data,
16154            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16155            throws com.google.protobuf.InvalidProtocolBufferException {
16156          return PARSER.parseFrom(data, extensionRegistry);
16157        }
16158        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(byte[] data)
16159            throws com.google.protobuf.InvalidProtocolBufferException {
16160          return PARSER.parseFrom(data);
16161        }
16162        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
16163            byte[] data,
16164            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16165            throws com.google.protobuf.InvalidProtocolBufferException {
16166          return PARSER.parseFrom(data, extensionRegistry);
16167        }
16168        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(java.io.InputStream input)
16169            throws java.io.IOException {
16170          return PARSER.parseFrom(input);
16171        }
16172        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
16173            java.io.InputStream input,
16174            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16175            throws java.io.IOException {
16176          return PARSER.parseFrom(input, extensionRegistry);
16177        }
16178        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseDelimitedFrom(java.io.InputStream input)
16179            throws java.io.IOException {
16180          return PARSER.parseDelimitedFrom(input);
16181        }
16182        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseDelimitedFrom(
16183            java.io.InputStream input,
16184            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16185            throws java.io.IOException {
16186          return PARSER.parseDelimitedFrom(input, extensionRegistry);
16187        }
16188        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
16189            com.google.protobuf.CodedInputStream input)
16190            throws java.io.IOException {
16191          return PARSER.parseFrom(input);
16192        }
16193        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
16194            com.google.protobuf.CodedInputStream input,
16195            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16196            throws java.io.IOException {
16197          return PARSER.parseFrom(input, extensionRegistry);
16198        }
16199    
16200        public static Builder newBuilder() { return Builder.create(); }
16201        public Builder newBuilderForType() { return newBuilder(); }
16202        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto prototype) {
16203          return newBuilder().mergeFrom(prototype);
16204        }
16205        public Builder toBuilder() { return newBuilder(this); }
16206    
16207        @java.lang.Override
16208        protected Builder newBuilderForType(
16209            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
16210          Builder builder = new Builder(parent);
16211          return builder;
16212        }
16213        /**
16214         * Protobuf type {@code hadoop.hdfs.AcceptRecoveryRequestProto}
16215         *
16216         * <pre>
16217         **
16218         * acceptRecovery()
16219         * </pre>
16220         */
16221        public static final class Builder extends
16222            com.google.protobuf.GeneratedMessage.Builder<Builder>
16223           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProtoOrBuilder {
16224          public static final com.google.protobuf.Descriptors.Descriptor
16225              getDescriptor() {
16226            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor;
16227          }
16228    
16229          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
16230              internalGetFieldAccessorTable() {
16231            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_fieldAccessorTable
16232                .ensureFieldAccessorsInitialized(
16233                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.Builder.class);
16234          }
16235    
16236          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.newBuilder()
16237          private Builder() {
16238            maybeForceBuilderInitialization();
16239          }
16240    
16241          private Builder(
16242              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
16243            super(parent);
16244            maybeForceBuilderInitialization();
16245          }
16246          private void maybeForceBuilderInitialization() {
16247            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
16248              getReqInfoFieldBuilder();
16249              getStateToAcceptFieldBuilder();
16250            }
16251          }
16252          private static Builder create() {
16253            return new Builder();
16254          }
16255    
16256          public Builder clear() {
16257            super.clear();
16258            if (reqInfoBuilder_ == null) {
16259              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
16260            } else {
16261              reqInfoBuilder_.clear();
16262            }
16263            bitField0_ = (bitField0_ & ~0x00000001);
16264            if (stateToAcceptBuilder_ == null) {
16265              stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
16266            } else {
16267              stateToAcceptBuilder_.clear();
16268            }
16269            bitField0_ = (bitField0_ & ~0x00000002);
16270            fromURL_ = "";
16271            bitField0_ = (bitField0_ & ~0x00000004);
16272            return this;
16273          }
16274    
16275          public Builder clone() {
16276            return create().mergeFrom(buildPartial());
16277          }
16278    
16279          public com.google.protobuf.Descriptors.Descriptor
16280              getDescriptorForType() {
16281            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor;
16282          }
16283    
16284          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto getDefaultInstanceForType() {
16285            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance();
16286          }
16287    
16288          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto build() {
16289            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto result = buildPartial();
16290            if (!result.isInitialized()) {
16291              throw newUninitializedMessageException(result);
16292            }
16293            return result;
16294          }
16295    
16296          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto buildPartial() {
16297            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto(this);
16298            int from_bitField0_ = bitField0_;
16299            int to_bitField0_ = 0;
16300            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
16301              to_bitField0_ |= 0x00000001;
16302            }
16303            if (reqInfoBuilder_ == null) {
16304              result.reqInfo_ = reqInfo_;
16305            } else {
16306              result.reqInfo_ = reqInfoBuilder_.build();
16307            }
16308            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
16309              to_bitField0_ |= 0x00000002;
16310            }
16311            if (stateToAcceptBuilder_ == null) {
16312              result.stateToAccept_ = stateToAccept_;
16313            } else {
16314              result.stateToAccept_ = stateToAcceptBuilder_.build();
16315            }
16316            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
16317              to_bitField0_ |= 0x00000004;
16318            }
16319            result.fromURL_ = fromURL_;
16320            result.bitField0_ = to_bitField0_;
16321            onBuilt();
16322            return result;
16323          }
16324    
16325          public Builder mergeFrom(com.google.protobuf.Message other) {
16326            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto) {
16327              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)other);
16328            } else {
16329              super.mergeFrom(other);
16330              return this;
16331            }
16332          }
16333    
16334          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto other) {
16335            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance()) return this;
16336            if (other.hasReqInfo()) {
16337              mergeReqInfo(other.getReqInfo());
16338            }
16339            if (other.hasStateToAccept()) {
16340              mergeStateToAccept(other.getStateToAccept());
16341            }
16342            if (other.hasFromURL()) {
16343              bitField0_ |= 0x00000004;
16344              fromURL_ = other.fromURL_;
16345              onChanged();
16346            }
16347            this.mergeUnknownFields(other.getUnknownFields());
16348            return this;
16349          }
16350    
16351          public final boolean isInitialized() {
16352            if (!hasReqInfo()) {
16353              
16354              return false;
16355            }
16356            if (!hasStateToAccept()) {
16357              
16358              return false;
16359            }
16360            if (!hasFromURL()) {
16361              
16362              return false;
16363            }
16364            if (!getReqInfo().isInitialized()) {
16365              
16366              return false;
16367            }
16368            if (!getStateToAccept().isInitialized()) {
16369              
16370              return false;
16371            }
16372            return true;
16373          }
16374    
16375          public Builder mergeFrom(
16376              com.google.protobuf.CodedInputStream input,
16377              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16378              throws java.io.IOException {
16379            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parsedMessage = null;
16380            try {
16381              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
16382            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
16383              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto) e.getUnfinishedMessage();
16384              throw e;
16385            } finally {
16386              if (parsedMessage != null) {
16387                mergeFrom(parsedMessage);
16388              }
16389            }
16390            return this;
16391          }
16392          private int bitField0_;
16393    
16394          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
16395          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
16396          private com.google.protobuf.SingleFieldBuilder<
16397              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
16398          /**
16399           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16400           */
16401          public boolean hasReqInfo() {
16402            return ((bitField0_ & 0x00000001) == 0x00000001);
16403          }
16404          /**
16405           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16406           */
16407          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
16408            if (reqInfoBuilder_ == null) {
16409              return reqInfo_;
16410            } else {
16411              return reqInfoBuilder_.getMessage();
16412            }
16413          }
16414          /**
16415           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16416           */
16417          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
16418            if (reqInfoBuilder_ == null) {
16419              if (value == null) {
16420                throw new NullPointerException();
16421              }
16422              reqInfo_ = value;
16423              onChanged();
16424            } else {
16425              reqInfoBuilder_.setMessage(value);
16426            }
16427            bitField0_ |= 0x00000001;
16428            return this;
16429          }
16430          /**
16431           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16432           */
16433          public Builder setReqInfo(
16434              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
16435            if (reqInfoBuilder_ == null) {
16436              reqInfo_ = builderForValue.build();
16437              onChanged();
16438            } else {
16439              reqInfoBuilder_.setMessage(builderForValue.build());
16440            }
16441            bitField0_ |= 0x00000001;
16442            return this;
16443          }
16444          /**
16445           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16446           */
16447          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
16448            if (reqInfoBuilder_ == null) {
16449              if (((bitField0_ & 0x00000001) == 0x00000001) &&
16450                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
16451                reqInfo_ =
16452                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
16453              } else {
16454                reqInfo_ = value;
16455              }
16456              onChanged();
16457            } else {
16458              reqInfoBuilder_.mergeFrom(value);
16459            }
16460            bitField0_ |= 0x00000001;
16461            return this;
16462          }
16463          /**
16464           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16465           */
16466          public Builder clearReqInfo() {
16467            if (reqInfoBuilder_ == null) {
16468              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
16469              onChanged();
16470            } else {
16471              reqInfoBuilder_.clear();
16472            }
16473            bitField0_ = (bitField0_ & ~0x00000001);
16474            return this;
16475          }
16476          /**
16477           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16478           */
16479          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
16480            bitField0_ |= 0x00000001;
16481            onChanged();
16482            return getReqInfoFieldBuilder().getBuilder();
16483          }
16484          /**
16485           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16486           */
16487          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
16488            if (reqInfoBuilder_ != null) {
16489              return reqInfoBuilder_.getMessageOrBuilder();
16490            } else {
16491              return reqInfo_;
16492            }
16493          }
16494          /**
16495           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16496           */
16497          private com.google.protobuf.SingleFieldBuilder<
16498              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
16499              getReqInfoFieldBuilder() {
16500            if (reqInfoBuilder_ == null) {
16501              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
16502                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
16503                      reqInfo_,
16504                      getParentForChildren(),
16505                      isClean());
16506              reqInfo_ = null;
16507            }
16508            return reqInfoBuilder_;
16509          }
16510    
16511          // required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;
16512          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
16513          private com.google.protobuf.SingleFieldBuilder<
16514              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> stateToAcceptBuilder_;
16515          /**
16516           * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16517           *
16518           * <pre>
16519           ** Details on the segment to recover 
16520           * </pre>
16521           */
16522          public boolean hasStateToAccept() {
16523            return ((bitField0_ & 0x00000002) == 0x00000002);
16524          }
16525          /**
16526           * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16527           *
16528           * <pre>
16529           ** Details on the segment to recover 
16530           * </pre>
16531           */
16532          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getStateToAccept() {
16533            if (stateToAcceptBuilder_ == null) {
16534              return stateToAccept_;
16535            } else {
16536              return stateToAcceptBuilder_.getMessage();
16537            }
16538          }
16539          /**
16540           * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16541           *
16542           * <pre>
16543           ** Details on the segment to recover 
16544           * </pre>
16545           */
16546          public Builder setStateToAccept(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
16547            if (stateToAcceptBuilder_ == null) {
16548              if (value == null) {
16549                throw new NullPointerException();
16550              }
16551              stateToAccept_ = value;
16552              onChanged();
16553            } else {
16554              stateToAcceptBuilder_.setMessage(value);
16555            }
16556            bitField0_ |= 0x00000002;
16557            return this;
16558          }
16559          /**
16560           * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16561           *
16562           * <pre>
16563           ** Details on the segment to recover 
16564           * </pre>
16565           */
16566          public Builder setStateToAccept(
16567              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) {
16568            if (stateToAcceptBuilder_ == null) {
16569              stateToAccept_ = builderForValue.build();
16570              onChanged();
16571            } else {
16572              stateToAcceptBuilder_.setMessage(builderForValue.build());
16573            }
16574            bitField0_ |= 0x00000002;
16575            return this;
16576          }
16577          /**
16578           * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16579           *
16580           * <pre>
16581           ** Details on the segment to recover 
16582           * </pre>
16583           */
16584          public Builder mergeStateToAccept(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
16585            if (stateToAcceptBuilder_ == null) {
16586              if (((bitField0_ & 0x00000002) == 0x00000002) &&
16587                  stateToAccept_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
16588                stateToAccept_ =
16589                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(stateToAccept_).mergeFrom(value).buildPartial();
16590              } else {
16591                stateToAccept_ = value;
16592              }
16593              onChanged();
16594            } else {
16595              stateToAcceptBuilder_.mergeFrom(value);
16596            }
16597            bitField0_ |= 0x00000002;
16598            return this;
16599          }
16600          /**
16601           * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16602           *
16603           * <pre>
16604           ** Details on the segment to recover 
16605           * </pre>
16606           */
16607          public Builder clearStateToAccept() {
16608            if (stateToAcceptBuilder_ == null) {
16609              stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
16610              onChanged();
16611            } else {
16612              stateToAcceptBuilder_.clear();
16613            }
16614            bitField0_ = (bitField0_ & ~0x00000002);
16615            return this;
16616          }
16617          /**
16618           * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16619           *
16620           * <pre>
16621           ** Details on the segment to recover 
16622           * </pre>
16623           */
16624          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getStateToAcceptBuilder() {
16625            bitField0_ |= 0x00000002;
16626            onChanged();
16627            return getStateToAcceptFieldBuilder().getBuilder();
16628          }
16629          /**
16630           * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16631           *
16632           * <pre>
16633           ** Details on the segment to recover 
16634           * </pre>
16635           */
16636          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getStateToAcceptOrBuilder() {
16637            if (stateToAcceptBuilder_ != null) {
16638              return stateToAcceptBuilder_.getMessageOrBuilder();
16639            } else {
16640              return stateToAccept_;
16641            }
16642          }
16643          /**
16644           * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16645           *
16646           * <pre>
16647           ** Details on the segment to recover 
16648           * </pre>
16649           */
16650          private com.google.protobuf.SingleFieldBuilder<
16651              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> 
16652              getStateToAcceptFieldBuilder() {
16653            if (stateToAcceptBuilder_ == null) {
16654              stateToAcceptBuilder_ = new com.google.protobuf.SingleFieldBuilder<
16655                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>(
16656                      stateToAccept_,
16657                      getParentForChildren(),
16658                      isClean());
16659              stateToAccept_ = null;
16660            }
16661            return stateToAcceptBuilder_;
16662          }
16663    
16664          // required string fromURL = 3;
16665          private java.lang.Object fromURL_ = "";
16666          /**
16667           * <code>required string fromURL = 3;</code>
16668           *
16669           * <pre>
16670           ** The URL from which the log may be copied 
16671           * </pre>
16672           */
16673          public boolean hasFromURL() {
16674            return ((bitField0_ & 0x00000004) == 0x00000004);
16675          }
16676          /**
16677           * <code>required string fromURL = 3;</code>
16678           *
16679           * <pre>
16680           ** The URL from which the log may be copied 
16681           * </pre>
16682           */
16683          public java.lang.String getFromURL() {
16684            java.lang.Object ref = fromURL_;
16685            if (!(ref instanceof java.lang.String)) {
16686              java.lang.String s = ((com.google.protobuf.ByteString) ref)
16687                  .toStringUtf8();
16688              fromURL_ = s;
16689              return s;
16690            } else {
16691              return (java.lang.String) ref;
16692            }
16693          }
16694          /**
16695           * <code>required string fromURL = 3;</code>
16696           *
16697           * <pre>
16698           ** The URL from which the log may be copied 
16699           * </pre>
16700           */
16701          public com.google.protobuf.ByteString
16702              getFromURLBytes() {
16703            java.lang.Object ref = fromURL_;
16704            if (ref instanceof String) {
16705              com.google.protobuf.ByteString b = 
16706                  com.google.protobuf.ByteString.copyFromUtf8(
16707                      (java.lang.String) ref);
16708              fromURL_ = b;
16709              return b;
16710            } else {
16711              return (com.google.protobuf.ByteString) ref;
16712            }
16713          }
16714          /**
16715           * <code>required string fromURL = 3;</code>
16716           *
16717           * <pre>
16718           ** The URL from which the log may be copied 
16719           * </pre>
16720           */
16721          public Builder setFromURL(
16722              java.lang.String value) {
16723            if (value == null) {
16724        throw new NullPointerException();
16725      }
16726      bitField0_ |= 0x00000004;
16727            fromURL_ = value;
16728            onChanged();
16729            return this;
16730          }
16731          /**
16732           * <code>required string fromURL = 3;</code>
16733           *
16734           * <pre>
16735           ** The URL from which the log may be copied 
16736           * </pre>
16737           */
16738          public Builder clearFromURL() {
16739            bitField0_ = (bitField0_ & ~0x00000004);
16740            fromURL_ = getDefaultInstance().getFromURL();
16741            onChanged();
16742            return this;
16743          }
16744          /**
16745           * <code>required string fromURL = 3;</code>
16746           *
16747           * <pre>
16748           ** The URL from which the log may be copied 
16749           * </pre>
16750           */
16751          public Builder setFromURLBytes(
16752              com.google.protobuf.ByteString value) {
16753            if (value == null) {
16754        throw new NullPointerException();
16755      }
16756      bitField0_ |= 0x00000004;
16757            fromURL_ = value;
16758            onChanged();
16759            return this;
16760          }
16761    
16762          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.AcceptRecoveryRequestProto)
16763        }
16764    
16765        static {
16766          defaultInstance = new AcceptRecoveryRequestProto(true);
16767          defaultInstance.initFields();
16768        }
16769    
16770        // @@protoc_insertion_point(class_scope:hadoop.hdfs.AcceptRecoveryRequestProto)
16771      }
16772    
16773      public interface AcceptRecoveryResponseProtoOrBuilder
16774          extends com.google.protobuf.MessageOrBuilder {
16775      }
16776      /**
16777       * Protobuf type {@code hadoop.hdfs.AcceptRecoveryResponseProto}
16778       */
16779      public static final class AcceptRecoveryResponseProto extends
16780          com.google.protobuf.GeneratedMessage
16781          implements AcceptRecoveryResponseProtoOrBuilder {
16782        // Use AcceptRecoveryResponseProto.newBuilder() to construct.
16783        private AcceptRecoveryResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
16784          super(builder);
16785          this.unknownFields = builder.getUnknownFields();
16786        }
16787        private AcceptRecoveryResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
16788    
16789        private static final AcceptRecoveryResponseProto defaultInstance;
16790        public static AcceptRecoveryResponseProto getDefaultInstance() {
16791          return defaultInstance;
16792        }
16793    
16794        public AcceptRecoveryResponseProto getDefaultInstanceForType() {
16795          return defaultInstance;
16796        }
16797    
16798        private final com.google.protobuf.UnknownFieldSet unknownFields;
16799        @java.lang.Override
16800        public final com.google.protobuf.UnknownFieldSet
16801            getUnknownFields() {
16802          return this.unknownFields;
16803        }
16804        private AcceptRecoveryResponseProto(
16805            com.google.protobuf.CodedInputStream input,
16806            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16807            throws com.google.protobuf.InvalidProtocolBufferException {
16808          initFields();
16809          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
16810              com.google.protobuf.UnknownFieldSet.newBuilder();
16811          try {
16812            boolean done = false;
16813            while (!done) {
16814              int tag = input.readTag();
16815              switch (tag) {
16816                case 0:
16817                  done = true;
16818                  break;
16819                default: {
16820                  if (!parseUnknownField(input, unknownFields,
16821                                         extensionRegistry, tag)) {
16822                    done = true;
16823                  }
16824                  break;
16825                }
16826              }
16827            }
16828          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
16829            throw e.setUnfinishedMessage(this);
16830          } catch (java.io.IOException e) {
16831            throw new com.google.protobuf.InvalidProtocolBufferException(
16832                e.getMessage()).setUnfinishedMessage(this);
16833          } finally {
16834            this.unknownFields = unknownFields.build();
16835            makeExtensionsImmutable();
16836          }
16837        }
16838        public static final com.google.protobuf.Descriptors.Descriptor
16839            getDescriptor() {
16840          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor;
16841        }
16842    
16843        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
16844            internalGetFieldAccessorTable() {
16845          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_fieldAccessorTable
16846              .ensureFieldAccessorsInitialized(
16847                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.Builder.class);
16848        }
16849    
16850        public static com.google.protobuf.Parser<AcceptRecoveryResponseProto> PARSER =
16851            new com.google.protobuf.AbstractParser<AcceptRecoveryResponseProto>() {
16852          public AcceptRecoveryResponseProto parsePartialFrom(
16853              com.google.protobuf.CodedInputStream input,
16854              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16855              throws com.google.protobuf.InvalidProtocolBufferException {
16856            return new AcceptRecoveryResponseProto(input, extensionRegistry);
16857          }
16858        };
16859    
16860        @java.lang.Override
16861        public com.google.protobuf.Parser<AcceptRecoveryResponseProto> getParserForType() {
16862          return PARSER;
16863        }
16864    
16865        private void initFields() {
16866        }
16867        private byte memoizedIsInitialized = -1;
16868        public final boolean isInitialized() {
16869          byte isInitialized = memoizedIsInitialized;
16870          if (isInitialized != -1) return isInitialized == 1;
16871    
16872          memoizedIsInitialized = 1;
16873          return true;
16874        }
16875    
16876        public void writeTo(com.google.protobuf.CodedOutputStream output)
16877                            throws java.io.IOException {
16878          getSerializedSize();
16879          getUnknownFields().writeTo(output);
16880        }
16881    
16882        private int memoizedSerializedSize = -1;
16883        public int getSerializedSize() {
16884          int size = memoizedSerializedSize;
16885          if (size != -1) return size;
16886    
16887          size = 0;
16888          size += getUnknownFields().getSerializedSize();
16889          memoizedSerializedSize = size;
16890          return size;
16891        }
16892    
16893        private static final long serialVersionUID = 0L;
16894        @java.lang.Override
16895        protected java.lang.Object writeReplace()
16896            throws java.io.ObjectStreamException {
16897          return super.writeReplace();
16898        }
16899    
16900        @java.lang.Override
16901        public boolean equals(final java.lang.Object obj) {
16902          if (obj == this) {
16903           return true;
16904          }
16905          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto)) {
16906            return super.equals(obj);
16907          }
16908          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) obj;
16909    
16910          boolean result = true;
16911          result = result &&
16912              getUnknownFields().equals(other.getUnknownFields());
16913          return result;
16914        }
16915    
16916        private int memoizedHashCode = 0;
16917        @java.lang.Override
16918        public int hashCode() {
16919          if (memoizedHashCode != 0) {
16920            return memoizedHashCode;
16921          }
16922          int hash = 41;
16923          hash = (19 * hash) + getDescriptorForType().hashCode();
16924          hash = (29 * hash) + getUnknownFields().hashCode();
16925          memoizedHashCode = hash;
16926          return hash;
16927        }
16928    
16929        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
16930            com.google.protobuf.ByteString data)
16931            throws com.google.protobuf.InvalidProtocolBufferException {
16932          return PARSER.parseFrom(data);
16933        }
16934        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
16935            com.google.protobuf.ByteString data,
16936            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16937            throws com.google.protobuf.InvalidProtocolBufferException {
16938          return PARSER.parseFrom(data, extensionRegistry);
16939        }
16940        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(byte[] data)
16941            throws com.google.protobuf.InvalidProtocolBufferException {
16942          return PARSER.parseFrom(data);
16943        }
16944        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
16945            byte[] data,
16946            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16947            throws com.google.protobuf.InvalidProtocolBufferException {
16948          return PARSER.parseFrom(data, extensionRegistry);
16949        }
16950        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(java.io.InputStream input)
16951            throws java.io.IOException {
16952          return PARSER.parseFrom(input);
16953        }
16954        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
16955            java.io.InputStream input,
16956            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16957            throws java.io.IOException {
16958          return PARSER.parseFrom(input, extensionRegistry);
16959        }
16960        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseDelimitedFrom(java.io.InputStream input)
16961            throws java.io.IOException {
16962          return PARSER.parseDelimitedFrom(input);
16963        }
16964        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseDelimitedFrom(
16965            java.io.InputStream input,
16966            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16967            throws java.io.IOException {
16968          return PARSER.parseDelimitedFrom(input, extensionRegistry);
16969        }
16970        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
16971            com.google.protobuf.CodedInputStream input)
16972            throws java.io.IOException {
16973          return PARSER.parseFrom(input);
16974        }
16975        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
16976            com.google.protobuf.CodedInputStream input,
16977            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16978            throws java.io.IOException {
16979          return PARSER.parseFrom(input, extensionRegistry);
16980        }
16981    
16982        public static Builder newBuilder() { return Builder.create(); }
16983        public Builder newBuilderForType() { return newBuilder(); }
16984        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto prototype) {
16985          return newBuilder().mergeFrom(prototype);
16986        }
16987        public Builder toBuilder() { return newBuilder(this); }
16988    
16989        @java.lang.Override
16990        protected Builder newBuilderForType(
16991            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
16992          Builder builder = new Builder(parent);
16993          return builder;
16994        }
16995        /**
16996         * Protobuf type {@code hadoop.hdfs.AcceptRecoveryResponseProto}
16997         */
16998        public static final class Builder extends
16999            com.google.protobuf.GeneratedMessage.Builder<Builder>
17000           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProtoOrBuilder {
17001          public static final com.google.protobuf.Descriptors.Descriptor
17002              getDescriptor() {
17003            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor;
17004          }
17005    
17006          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
17007              internalGetFieldAccessorTable() {
17008            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_fieldAccessorTable
17009                .ensureFieldAccessorsInitialized(
17010                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.Builder.class);
17011          }
17012    
17013          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.newBuilder()
17014          private Builder() {
17015            maybeForceBuilderInitialization();
17016          }
17017    
17018          private Builder(
17019              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
17020            super(parent);
17021            maybeForceBuilderInitialization();
17022          }
17023          private void maybeForceBuilderInitialization() {
17024            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
17025            }
17026          }
17027          private static Builder create() {
17028            return new Builder();
17029          }
17030    
17031          public Builder clear() {
17032            super.clear();
17033            return this;
17034          }
17035    
17036          public Builder clone() {
17037            return create().mergeFrom(buildPartial());
17038          }
17039    
17040          public com.google.protobuf.Descriptors.Descriptor
17041              getDescriptorForType() {
17042            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor;
17043          }
17044    
17045          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto getDefaultInstanceForType() {
17046            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance();
17047          }
17048    
17049          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto build() {
17050            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto result = buildPartial();
17051            if (!result.isInitialized()) {
17052              throw newUninitializedMessageException(result);
17053            }
17054            return result;
17055          }
17056    
17057          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto buildPartial() {
17058            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto(this);
17059            onBuilt();
17060            return result;
17061          }
17062    
17063          public Builder mergeFrom(com.google.protobuf.Message other) {
17064            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) {
17065              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto)other);
17066            } else {
17067              super.mergeFrom(other);
17068              return this;
17069            }
17070          }
17071    
17072          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto other) {
17073            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance()) return this;
17074            this.mergeUnknownFields(other.getUnknownFields());
17075            return this;
17076          }
17077    
17078          public final boolean isInitialized() {
17079            return true;
17080          }
17081    
17082          public Builder mergeFrom(
17083              com.google.protobuf.CodedInputStream input,
17084              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17085              throws java.io.IOException {
17086            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parsedMessage = null;
17087            try {
17088              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
17089            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
17090              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) e.getUnfinishedMessage();
17091              throw e;
17092            } finally {
17093              if (parsedMessage != null) {
17094                mergeFrom(parsedMessage);
17095              }
17096            }
17097            return this;
17098          }
17099    
17100          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.AcceptRecoveryResponseProto)
17101        }
17102    
17103        static {
17104          defaultInstance = new AcceptRecoveryResponseProto(true);
17105          defaultInstance.initFields();
17106        }
17107    
17108        // @@protoc_insertion_point(class_scope:hadoop.hdfs.AcceptRecoveryResponseProto)
17109      }
17110    
17111      /**
17112       * Protobuf service {@code hadoop.hdfs.QJournalProtocolService}
17113       *
17114       * <pre>
17115       **
17116       * Protocol used to journal edits to a JournalNode.
17117       * See the request and response for details of rpc call.
17118       * </pre>
17119       */
17120      public static abstract class QJournalProtocolService
17121          implements com.google.protobuf.Service {
17122        protected QJournalProtocolService() {}
17123    
17124        public interface Interface {
17125          /**
17126           * <code>rpc isFormatted(.hadoop.hdfs.IsFormattedRequestProto) returns (.hadoop.hdfs.IsFormattedResponseProto);</code>
17127           */
17128          public abstract void isFormatted(
17129              com.google.protobuf.RpcController controller,
17130              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
17131              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto> done);
17132    
17133          /**
17134           * <code>rpc getJournalState(.hadoop.hdfs.GetJournalStateRequestProto) returns (.hadoop.hdfs.GetJournalStateResponseProto);</code>
17135           */
17136          public abstract void getJournalState(
17137              com.google.protobuf.RpcController controller,
17138              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
17139              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto> done);
17140    
17141          /**
17142           * <code>rpc newEpoch(.hadoop.hdfs.NewEpochRequestProto) returns (.hadoop.hdfs.NewEpochResponseProto);</code>
17143           */
17144          public abstract void newEpoch(
17145              com.google.protobuf.RpcController controller,
17146              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
17147              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto> done);
17148    
17149          /**
17150           * <code>rpc format(.hadoop.hdfs.FormatRequestProto) returns (.hadoop.hdfs.FormatResponseProto);</code>
17151           */
17152          public abstract void format(
17153              com.google.protobuf.RpcController controller,
17154              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
17155              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto> done);
17156    
17157          /**
17158           * <code>rpc journal(.hadoop.hdfs.JournalRequestProto) returns (.hadoop.hdfs.JournalResponseProto);</code>
17159           */
17160          public abstract void journal(
17161              com.google.protobuf.RpcController controller,
17162              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
17163              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto> done);
17164    
17165          /**
17166           * <code>rpc heartbeat(.hadoop.hdfs.HeartbeatRequestProto) returns (.hadoop.hdfs.HeartbeatResponseProto);</code>
17167           */
17168          public abstract void heartbeat(
17169              com.google.protobuf.RpcController controller,
17170              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
17171              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto> done);
17172    
17173          /**
17174           * <code>rpc startLogSegment(.hadoop.hdfs.StartLogSegmentRequestProto) returns (.hadoop.hdfs.StartLogSegmentResponseProto);</code>
17175           */
17176          public abstract void startLogSegment(
17177              com.google.protobuf.RpcController controller,
17178              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
17179              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto> done);
17180    
17181          /**
17182           * <code>rpc finalizeLogSegment(.hadoop.hdfs.FinalizeLogSegmentRequestProto) returns (.hadoop.hdfs.FinalizeLogSegmentResponseProto);</code>
17183           */
17184          public abstract void finalizeLogSegment(
17185              com.google.protobuf.RpcController controller,
17186              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
17187              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto> done);
17188    
17189          /**
17190           * <code>rpc purgeLogs(.hadoop.hdfs.PurgeLogsRequestProto) returns (.hadoop.hdfs.PurgeLogsResponseProto);</code>
17191           */
17192          public abstract void purgeLogs(
17193              com.google.protobuf.RpcController controller,
17194              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
17195              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto> done);
17196    
17197          /**
17198           * <code>rpc getEditLogManifest(.hadoop.hdfs.GetEditLogManifestRequestProto) returns (.hadoop.hdfs.GetEditLogManifestResponseProto);</code>
17199           */
17200          public abstract void getEditLogManifest(
17201              com.google.protobuf.RpcController controller,
17202              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
17203              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto> done);
17204    
17205          /**
17206           * <code>rpc prepareRecovery(.hadoop.hdfs.PrepareRecoveryRequestProto) returns (.hadoop.hdfs.PrepareRecoveryResponseProto);</code>
17207           */
17208          public abstract void prepareRecovery(
17209              com.google.protobuf.RpcController controller,
17210              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
17211              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto> done);
17212    
17213          /**
17214           * <code>rpc acceptRecovery(.hadoop.hdfs.AcceptRecoveryRequestProto) returns (.hadoop.hdfs.AcceptRecoveryResponseProto);</code>
17215           */
17216          public abstract void acceptRecovery(
17217              com.google.protobuf.RpcController controller,
17218              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
17219              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto> done);
17220    
17221        }
17222    
17223        public static com.google.protobuf.Service newReflectiveService(
17224            final Interface impl) {
17225          return new QJournalProtocolService() {
17226            @java.lang.Override
17227            public  void isFormatted(
17228                com.google.protobuf.RpcController controller,
17229                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
17230                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto> done) {
17231              impl.isFormatted(controller, request, done);
17232            }
17233    
17234            @java.lang.Override
17235            public  void getJournalState(
17236                com.google.protobuf.RpcController controller,
17237                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
17238                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto> done) {
17239              impl.getJournalState(controller, request, done);
17240            }
17241    
17242            @java.lang.Override
17243            public  void newEpoch(
17244                com.google.protobuf.RpcController controller,
17245                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
17246                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto> done) {
17247              impl.newEpoch(controller, request, done);
17248            }
17249    
17250            @java.lang.Override
17251            public  void format(
17252                com.google.protobuf.RpcController controller,
17253                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
17254                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto> done) {
17255              impl.format(controller, request, done);
17256            }
17257    
17258            @java.lang.Override
17259            public  void journal(
17260                com.google.protobuf.RpcController controller,
17261                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
17262                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto> done) {
17263              impl.journal(controller, request, done);
17264            }
17265    
17266            @java.lang.Override
17267            public  void heartbeat(
17268                com.google.protobuf.RpcController controller,
17269                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
17270                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto> done) {
17271              impl.heartbeat(controller, request, done);
17272            }
17273    
17274            @java.lang.Override
17275            public  void startLogSegment(
17276                com.google.protobuf.RpcController controller,
17277                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
17278                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto> done) {
17279              impl.startLogSegment(controller, request, done);
17280            }
17281    
17282            @java.lang.Override
17283            public  void finalizeLogSegment(
17284                com.google.protobuf.RpcController controller,
17285                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
17286                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto> done) {
17287              impl.finalizeLogSegment(controller, request, done);
17288            }
17289    
17290            @java.lang.Override
17291            public  void purgeLogs(
17292                com.google.protobuf.RpcController controller,
17293                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
17294                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto> done) {
17295              impl.purgeLogs(controller, request, done);
17296            }
17297    
17298            @java.lang.Override
17299            public  void getEditLogManifest(
17300                com.google.protobuf.RpcController controller,
17301                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
17302                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto> done) {
17303              impl.getEditLogManifest(controller, request, done);
17304            }
17305    
17306            @java.lang.Override
17307            public  void prepareRecovery(
17308                com.google.protobuf.RpcController controller,
17309                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
17310                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto> done) {
17311              impl.prepareRecovery(controller, request, done);
17312            }
17313    
17314            @java.lang.Override
17315            public  void acceptRecovery(
17316                com.google.protobuf.RpcController controller,
17317                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
17318                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto> done) {
17319              impl.acceptRecovery(controller, request, done);
17320            }
17321    
17322          };
17323        }
17324    
17325        public static com.google.protobuf.BlockingService
17326            newReflectiveBlockingService(final BlockingInterface impl) {
17327          return new com.google.protobuf.BlockingService() {
17328            public final com.google.protobuf.Descriptors.ServiceDescriptor
17329                getDescriptorForType() {
17330              return getDescriptor();
17331            }
17332    
17333            public final com.google.protobuf.Message callBlockingMethod(
17334                com.google.protobuf.Descriptors.MethodDescriptor method,
17335                com.google.protobuf.RpcController controller,
17336                com.google.protobuf.Message request)
17337                throws com.google.protobuf.ServiceException {
17338              if (method.getService() != getDescriptor()) {
17339                throw new java.lang.IllegalArgumentException(
17340                  "Service.callBlockingMethod() given method descriptor for " +
17341                  "wrong service type.");
17342              }
17343              switch(method.getIndex()) {
17344                case 0:
17345                  return impl.isFormatted(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)request);
17346                case 1:
17347                  return impl.getJournalState(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)request);
17348                case 2:
17349                  return impl.newEpoch(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)request);
17350                case 3:
17351                  return impl.format(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)request);
17352                case 4:
17353                  return impl.journal(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)request);
17354                case 5:
17355                  return impl.heartbeat(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)request);
17356                case 6:
17357                  return impl.startLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)request);
17358                case 7:
17359                  return impl.finalizeLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)request);
17360                case 8:
17361                  return impl.purgeLogs(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)request);
17362                case 9:
17363                  return impl.getEditLogManifest(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)request);
17364                case 10:
17365                  return impl.prepareRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)request);
17366                case 11:
17367                  return impl.acceptRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)request);
17368                default:
17369                  throw new java.lang.AssertionError("Can't get here.");
17370              }
17371            }
17372    
17373            public final com.google.protobuf.Message
17374                getRequestPrototype(
17375                com.google.protobuf.Descriptors.MethodDescriptor method) {
17376              if (method.getService() != getDescriptor()) {
17377                throw new java.lang.IllegalArgumentException(
17378                  "Service.getRequestPrototype() given method " +
17379                  "descriptor for wrong service type.");
17380              }
17381              switch(method.getIndex()) {
17382                case 0:
17383                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance();
17384                case 1:
17385                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance();
17386                case 2:
17387                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance();
17388                case 3:
17389                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance();
17390                case 4:
17391                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance();
17392                case 5:
17393                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
17394                case 6:
17395                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance();
17396                case 7:
17397                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance();
17398                case 8:
17399                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance();
17400                case 9:
17401                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance();
17402                case 10:
17403                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance();
17404                case 11:
17405                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance();
17406                default:
17407                  throw new java.lang.AssertionError("Can't get here.");
17408              }
17409            }
17410    
17411            public final com.google.protobuf.Message
17412                getResponsePrototype(
17413                com.google.protobuf.Descriptors.MethodDescriptor method) {
17414              if (method.getService() != getDescriptor()) {
17415                throw new java.lang.IllegalArgumentException(
17416                  "Service.getResponsePrototype() given method " +
17417                  "descriptor for wrong service type.");
17418              }
17419              switch(method.getIndex()) {
17420                case 0:
17421                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance();
17422                case 1:
17423                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance();
17424                case 2:
17425                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance();
17426                case 3:
17427                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance();
17428                case 4:
17429                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance();
17430                case 5:
17431                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
17432                case 6:
17433                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance();
17434                case 7:
17435                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance();
17436                case 8:
17437                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance();
17438                case 9:
17439                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance();
17440                case 10:
17441                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance();
17442                case 11:
17443                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance();
17444                default:
17445                  throw new java.lang.AssertionError("Can't get here.");
17446              }
17447            }
17448    
17449          };
17450        }
17451    
17452        /**
17453         * <code>rpc isFormatted(.hadoop.hdfs.IsFormattedRequestProto) returns (.hadoop.hdfs.IsFormattedResponseProto);</code>
17454         */
17455        public abstract void isFormatted(
17456            com.google.protobuf.RpcController controller,
17457            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
17458            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto> done);
17459    
17460        /**
17461         * <code>rpc getJournalState(.hadoop.hdfs.GetJournalStateRequestProto) returns (.hadoop.hdfs.GetJournalStateResponseProto);</code>
17462         */
17463        public abstract void getJournalState(
17464            com.google.protobuf.RpcController controller,
17465            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
17466            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto> done);
17467    
17468        /**
17469         * <code>rpc newEpoch(.hadoop.hdfs.NewEpochRequestProto) returns (.hadoop.hdfs.NewEpochResponseProto);</code>
17470         */
17471        public abstract void newEpoch(
17472            com.google.protobuf.RpcController controller,
17473            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
17474            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto> done);
17475    
17476        /**
17477         * <code>rpc format(.hadoop.hdfs.FormatRequestProto) returns (.hadoop.hdfs.FormatResponseProto);</code>
17478         */
17479        public abstract void format(
17480            com.google.protobuf.RpcController controller,
17481            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
17482            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto> done);
17483    
17484        /**
17485         * <code>rpc journal(.hadoop.hdfs.JournalRequestProto) returns (.hadoop.hdfs.JournalResponseProto);</code>
17486         */
17487        public abstract void journal(
17488            com.google.protobuf.RpcController controller,
17489            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
17490            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto> done);
17491    
17492        /**
17493         * <code>rpc heartbeat(.hadoop.hdfs.HeartbeatRequestProto) returns (.hadoop.hdfs.HeartbeatResponseProto);</code>
17494         */
17495        public abstract void heartbeat(
17496            com.google.protobuf.RpcController controller,
17497            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
17498            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto> done);
17499    
17500        /**
17501         * <code>rpc startLogSegment(.hadoop.hdfs.StartLogSegmentRequestProto) returns (.hadoop.hdfs.StartLogSegmentResponseProto);</code>
17502         */
17503        public abstract void startLogSegment(
17504            com.google.protobuf.RpcController controller,
17505            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
17506            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto> done);
17507    
17508        /**
17509         * <code>rpc finalizeLogSegment(.hadoop.hdfs.FinalizeLogSegmentRequestProto) returns (.hadoop.hdfs.FinalizeLogSegmentResponseProto);</code>
17510         */
17511        public abstract void finalizeLogSegment(
17512            com.google.protobuf.RpcController controller,
17513            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
17514            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto> done);
17515    
17516        /**
17517         * <code>rpc purgeLogs(.hadoop.hdfs.PurgeLogsRequestProto) returns (.hadoop.hdfs.PurgeLogsResponseProto);</code>
17518         */
17519        public abstract void purgeLogs(
17520            com.google.protobuf.RpcController controller,
17521            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
17522            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto> done);
17523    
17524        /**
17525         * <code>rpc getEditLogManifest(.hadoop.hdfs.GetEditLogManifestRequestProto) returns (.hadoop.hdfs.GetEditLogManifestResponseProto);</code>
17526         */
17527        public abstract void getEditLogManifest(
17528            com.google.protobuf.RpcController controller,
17529            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
17530            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto> done);
17531    
17532        /**
17533         * <code>rpc prepareRecovery(.hadoop.hdfs.PrepareRecoveryRequestProto) returns (.hadoop.hdfs.PrepareRecoveryResponseProto);</code>
17534         */
17535        public abstract void prepareRecovery(
17536            com.google.protobuf.RpcController controller,
17537            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
17538            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto> done);
17539    
17540        /**
17541         * <code>rpc acceptRecovery(.hadoop.hdfs.AcceptRecoveryRequestProto) returns (.hadoop.hdfs.AcceptRecoveryResponseProto);</code>
17542         */
17543        public abstract void acceptRecovery(
17544            com.google.protobuf.RpcController controller,
17545            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
17546            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto> done);
17547    
17548        public static final
17549            com.google.protobuf.Descriptors.ServiceDescriptor
17550            getDescriptor() {
17551          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.getDescriptor().getServices().get(0);
17552        }
17553        public final com.google.protobuf.Descriptors.ServiceDescriptor
17554            getDescriptorForType() {
17555          return getDescriptor();
17556        }
17557    
17558        public final void callMethod(
17559            com.google.protobuf.Descriptors.MethodDescriptor method,
17560            com.google.protobuf.RpcController controller,
17561            com.google.protobuf.Message request,
17562            com.google.protobuf.RpcCallback<
17563              com.google.protobuf.Message> done) {
17564          if (method.getService() != getDescriptor()) {
17565            throw new java.lang.IllegalArgumentException(
17566              "Service.callMethod() given method descriptor for wrong " +
17567              "service type.");
17568          }
17569          switch(method.getIndex()) {
17570            case 0:
17571              this.isFormatted(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)request,
17572                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto>specializeCallback(
17573                  done));
17574              return;
17575            case 1:
17576              this.getJournalState(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)request,
17577                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto>specializeCallback(
17578                  done));
17579              return;
17580            case 2:
17581              this.newEpoch(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)request,
17582                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto>specializeCallback(
17583                  done));
17584              return;
17585            case 3:
17586              this.format(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)request,
17587                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto>specializeCallback(
17588                  done));
17589              return;
17590            case 4:
17591              this.journal(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)request,
17592                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto>specializeCallback(
17593                  done));
17594              return;
17595            case 5:
17596              this.heartbeat(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)request,
17597                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto>specializeCallback(
17598                  done));
17599              return;
17600            case 6:
17601              this.startLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)request,
17602                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto>specializeCallback(
17603                  done));
17604              return;
17605            case 7:
17606              this.finalizeLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)request,
17607                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto>specializeCallback(
17608                  done));
17609              return;
17610            case 8:
17611              this.purgeLogs(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)request,
17612                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto>specializeCallback(
17613                  done));
17614              return;
17615            case 9:
17616              this.getEditLogManifest(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)request,
17617                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto>specializeCallback(
17618                  done));
17619              return;
17620            case 10:
17621              this.prepareRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)request,
17622                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto>specializeCallback(
17623                  done));
17624              return;
17625            case 11:
17626              this.acceptRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)request,
17627                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto>specializeCallback(
17628                  done));
17629              return;
17630            default:
17631              throw new java.lang.AssertionError("Can't get here.");
17632          }
17633        }
17634    
17635        public final com.google.protobuf.Message
17636            getRequestPrototype(
17637            com.google.protobuf.Descriptors.MethodDescriptor method) {
17638          if (method.getService() != getDescriptor()) {
17639            throw new java.lang.IllegalArgumentException(
17640              "Service.getRequestPrototype() given method " +
17641              "descriptor for wrong service type.");
17642          }
17643          switch(method.getIndex()) {
17644            case 0:
17645              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance();
17646            case 1:
17647              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance();
17648            case 2:
17649              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance();
17650            case 3:
17651              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance();
17652            case 4:
17653              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance();
17654            case 5:
17655              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
17656            case 6:
17657              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance();
17658            case 7:
17659              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance();
17660            case 8:
17661              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance();
17662            case 9:
17663              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance();
17664            case 10:
17665              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance();
17666            case 11:
17667              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance();
17668            default:
17669              throw new java.lang.AssertionError("Can't get here.");
17670          }
17671        }
17672    
17673        public final com.google.protobuf.Message
17674            getResponsePrototype(
17675            com.google.protobuf.Descriptors.MethodDescriptor method) {
17676          if (method.getService() != getDescriptor()) {
17677            throw new java.lang.IllegalArgumentException(
17678              "Service.getResponsePrototype() given method " +
17679              "descriptor for wrong service type.");
17680          }
17681          switch(method.getIndex()) {
17682            case 0:
17683              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance();
17684            case 1:
17685              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance();
17686            case 2:
17687              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance();
17688            case 3:
17689              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance();
17690            case 4:
17691              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance();
17692            case 5:
17693              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
17694            case 6:
17695              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance();
17696            case 7:
17697              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance();
17698            case 8:
17699              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance();
17700            case 9:
17701              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance();
17702            case 10:
17703              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance();
17704            case 11:
17705              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance();
17706            default:
17707              throw new java.lang.AssertionError("Can't get here.");
17708          }
17709        }
17710    
17711        public static Stub newStub(
17712            com.google.protobuf.RpcChannel channel) {
17713          return new Stub(channel);
17714        }
17715    
17716        public static final class Stub extends org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.QJournalProtocolService implements Interface {
17717          private Stub(com.google.protobuf.RpcChannel channel) {
17718            this.channel = channel;
17719          }
17720    
17721          private final com.google.protobuf.RpcChannel channel;
17722    
17723          public com.google.protobuf.RpcChannel getChannel() {
17724            return channel;
17725          }
17726    
17727          public  void isFormatted(
17728              com.google.protobuf.RpcController controller,
17729              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
17730              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto> done) {
17731            channel.callMethod(
17732              getDescriptor().getMethods().get(0),
17733              controller,
17734              request,
17735              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance(),
17736              com.google.protobuf.RpcUtil.generalizeCallback(
17737                done,
17738                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.class,
17739                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance()));
17740          }
17741    
17742          public  void getJournalState(
17743              com.google.protobuf.RpcController controller,
17744              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
17745              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto> done) {
17746            channel.callMethod(
17747              getDescriptor().getMethods().get(1),
17748              controller,
17749              request,
17750              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance(),
17751              com.google.protobuf.RpcUtil.generalizeCallback(
17752                done,
17753                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.class,
17754                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance()));
17755          }
17756    
17757          public  void newEpoch(
17758              com.google.protobuf.RpcController controller,
17759              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
17760              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto> done) {
17761            channel.callMethod(
17762              getDescriptor().getMethods().get(2),
17763              controller,
17764              request,
17765              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance(),
17766              com.google.protobuf.RpcUtil.generalizeCallback(
17767                done,
17768                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.class,
17769                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance()));
17770          }
17771    
17772          public  void format(
17773              com.google.protobuf.RpcController controller,
17774              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
17775              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto> done) {
17776            channel.callMethod(
17777              getDescriptor().getMethods().get(3),
17778              controller,
17779              request,
17780              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance(),
17781              com.google.protobuf.RpcUtil.generalizeCallback(
17782                done,
17783                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.class,
17784                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance()));
17785          }
17786    
17787          public  void journal(
17788              com.google.protobuf.RpcController controller,
17789              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
17790              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto> done) {
17791            channel.callMethod(
17792              getDescriptor().getMethods().get(4),
17793              controller,
17794              request,
17795              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance(),
17796              com.google.protobuf.RpcUtil.generalizeCallback(
17797                done,
17798                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.class,
17799                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance()));
17800          }
17801    
17802          public  void heartbeat(
17803              com.google.protobuf.RpcController controller,
17804              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
17805              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto> done) {
17806            channel.callMethod(
17807              getDescriptor().getMethods().get(5),
17808              controller,
17809              request,
17810              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance(),
17811              com.google.protobuf.RpcUtil.generalizeCallback(
17812                done,
17813                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.class,
17814                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance()));
17815          }
17816    
17817          public  void startLogSegment(
17818              com.google.protobuf.RpcController controller,
17819              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
17820              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto> done) {
17821            channel.callMethod(
17822              getDescriptor().getMethods().get(6),
17823              controller,
17824              request,
17825              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance(),
17826              com.google.protobuf.RpcUtil.generalizeCallback(
17827                done,
17828                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.class,
17829                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance()));
17830          }
17831    
17832          public  void finalizeLogSegment(
17833              com.google.protobuf.RpcController controller,
17834              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
17835              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto> done) {
17836            channel.callMethod(
17837              getDescriptor().getMethods().get(7),
17838              controller,
17839              request,
17840              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance(),
17841              com.google.protobuf.RpcUtil.generalizeCallback(
17842                done,
17843                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.class,
17844                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance()));
17845          }
17846    
17847          public  void purgeLogs(
17848              com.google.protobuf.RpcController controller,
17849              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
17850              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto> done) {
17851            channel.callMethod(
17852              getDescriptor().getMethods().get(8),
17853              controller,
17854              request,
17855              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance(),
17856              com.google.protobuf.RpcUtil.generalizeCallback(
17857                done,
17858                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.class,
17859                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance()));
17860          }
17861    
17862          public  void getEditLogManifest(
17863              com.google.protobuf.RpcController controller,
17864              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
17865              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto> done) {
17866            channel.callMethod(
17867              getDescriptor().getMethods().get(9),
17868              controller,
17869              request,
17870              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance(),
17871              com.google.protobuf.RpcUtil.generalizeCallback(
17872                done,
17873                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.class,
17874                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance()));
17875          }
17876    
17877          public  void prepareRecovery(
17878              com.google.protobuf.RpcController controller,
17879              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
17880              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto> done) {
17881            channel.callMethod(
17882              getDescriptor().getMethods().get(10),
17883              controller,
17884              request,
17885              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance(),
17886              com.google.protobuf.RpcUtil.generalizeCallback(
17887                done,
17888                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.class,
17889                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance()));
17890          }
17891    
17892          public  void acceptRecovery(
17893              com.google.protobuf.RpcController controller,
17894              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
17895              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto> done) {
17896            channel.callMethod(
17897              getDescriptor().getMethods().get(11),
17898              controller,
17899              request,
17900              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance(),
17901              com.google.protobuf.RpcUtil.generalizeCallback(
17902                done,
17903                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.class,
17904                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance()));
17905          }
17906        }
17907    
17908        public static BlockingInterface newBlockingStub(
17909            com.google.protobuf.BlockingRpcChannel channel) {
17910          return new BlockingStub(channel);
17911        }
17912    
17913        public interface BlockingInterface {
17914          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto isFormatted(
17915              com.google.protobuf.RpcController controller,
17916              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request)
17917              throws com.google.protobuf.ServiceException;
17918    
17919          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto getJournalState(
17920              com.google.protobuf.RpcController controller,
17921              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request)
17922              throws com.google.protobuf.ServiceException;
17923    
17924          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto newEpoch(
17925              com.google.protobuf.RpcController controller,
17926              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request)
17927              throws com.google.protobuf.ServiceException;
17928    
17929          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto format(
17930              com.google.protobuf.RpcController controller,
17931              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request)
17932              throws com.google.protobuf.ServiceException;
17933    
17934          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto journal(
17935              com.google.protobuf.RpcController controller,
17936              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request)
17937              throws com.google.protobuf.ServiceException;
17938    
17939          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto heartbeat(
17940              com.google.protobuf.RpcController controller,
17941              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request)
17942              throws com.google.protobuf.ServiceException;
17943    
17944          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto startLogSegment(
17945              com.google.protobuf.RpcController controller,
17946              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request)
17947              throws com.google.protobuf.ServiceException;
17948    
17949          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto finalizeLogSegment(
17950              com.google.protobuf.RpcController controller,
17951              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request)
17952              throws com.google.protobuf.ServiceException;
17953    
17954          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto purgeLogs(
17955              com.google.protobuf.RpcController controller,
17956              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request)
17957              throws com.google.protobuf.ServiceException;
17958    
17959          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto getEditLogManifest(
17960              com.google.protobuf.RpcController controller,
17961              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request)
17962              throws com.google.protobuf.ServiceException;
17963    
17964          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto prepareRecovery(
17965              com.google.protobuf.RpcController controller,
17966              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request)
17967              throws com.google.protobuf.ServiceException;
17968    
17969          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto acceptRecovery(
17970              com.google.protobuf.RpcController controller,
17971              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request)
17972              throws com.google.protobuf.ServiceException;
17973        }
17974    
17975        private static final class BlockingStub implements BlockingInterface {
17976          private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
17977            this.channel = channel;
17978          }
17979    
17980          private final com.google.protobuf.BlockingRpcChannel channel;
17981    
17982          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto isFormatted(
17983              com.google.protobuf.RpcController controller,
17984              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request)
17985              throws com.google.protobuf.ServiceException {
17986            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) channel.callBlockingMethod(
17987              getDescriptor().getMethods().get(0),
17988              controller,
17989              request,
17990              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance());
17991          }
17992    
17993    
17994          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto getJournalState(
17995              com.google.protobuf.RpcController controller,
17996              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request)
17997              throws com.google.protobuf.ServiceException {
17998            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) channel.callBlockingMethod(
17999              getDescriptor().getMethods().get(1),
18000              controller,
18001              request,
18002              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance());
18003          }
18004    
18005    
18006          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto newEpoch(
18007              com.google.protobuf.RpcController controller,
18008              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request)
18009              throws com.google.protobuf.ServiceException {
18010            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) channel.callBlockingMethod(
18011              getDescriptor().getMethods().get(2),
18012              controller,
18013              request,
18014              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance());
18015          }
18016    
18017    
18018          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto format(
18019              com.google.protobuf.RpcController controller,
18020              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request)
18021              throws com.google.protobuf.ServiceException {
18022            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) channel.callBlockingMethod(
18023              getDescriptor().getMethods().get(3),
18024              controller,
18025              request,
18026              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance());
18027          }
18028    
18029    
18030          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto journal(
18031              com.google.protobuf.RpcController controller,
18032              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request)
18033              throws com.google.protobuf.ServiceException {
18034            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) channel.callBlockingMethod(
18035              getDescriptor().getMethods().get(4),
18036              controller,
18037              request,
18038              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance());
18039          }
18040    
18041    
18042          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto heartbeat(
18043              com.google.protobuf.RpcController controller,
18044              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request)
18045              throws com.google.protobuf.ServiceException {
18046            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) channel.callBlockingMethod(
18047              getDescriptor().getMethods().get(5),
18048              controller,
18049              request,
18050              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance());
18051          }
18052    
18053    
18054          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto startLogSegment(
18055              com.google.protobuf.RpcController controller,
18056              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request)
18057              throws com.google.protobuf.ServiceException {
18058            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) channel.callBlockingMethod(
18059              getDescriptor().getMethods().get(6),
18060              controller,
18061              request,
18062              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance());
18063          }
18064    
18065    
18066          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto finalizeLogSegment(
18067              com.google.protobuf.RpcController controller,
18068              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request)
18069              throws com.google.protobuf.ServiceException {
18070            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) channel.callBlockingMethod(
18071              getDescriptor().getMethods().get(7),
18072              controller,
18073              request,
18074              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance());
18075          }
18076    
18077    
18078          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto purgeLogs(
18079              com.google.protobuf.RpcController controller,
18080              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request)
18081              throws com.google.protobuf.ServiceException {
18082            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) channel.callBlockingMethod(
18083              getDescriptor().getMethods().get(8),
18084              controller,
18085              request,
18086              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance());
18087          }
18088    
18089    
18090          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto getEditLogManifest(
18091              com.google.protobuf.RpcController controller,
18092              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request)
18093              throws com.google.protobuf.ServiceException {
18094            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) channel.callBlockingMethod(
18095              getDescriptor().getMethods().get(9),
18096              controller,
18097              request,
18098              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance());
18099          }
18100    
18101    
18102          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto prepareRecovery(
18103              com.google.protobuf.RpcController controller,
18104              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request)
18105              throws com.google.protobuf.ServiceException {
18106            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) channel.callBlockingMethod(
18107              getDescriptor().getMethods().get(10),
18108              controller,
18109              request,
18110              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance());
18111          }
18112    
18113    
18114          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto acceptRecovery(
18115              com.google.protobuf.RpcController controller,
18116              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request)
18117              throws com.google.protobuf.ServiceException {
18118            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) channel.callBlockingMethod(
18119              getDescriptor().getMethods().get(11),
18120              controller,
18121              request,
18122              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance());
18123          }
18124    
18125        }
18126    
18127        // @@protoc_insertion_point(class_scope:hadoop.hdfs.QJournalProtocolService)
18128      }
18129    
18130      private static com.google.protobuf.Descriptors.Descriptor
18131        internal_static_hadoop_hdfs_JournalIdProto_descriptor;
18132      private static
18133        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18134          internal_static_hadoop_hdfs_JournalIdProto_fieldAccessorTable;
18135      private static com.google.protobuf.Descriptors.Descriptor
18136        internal_static_hadoop_hdfs_RequestInfoProto_descriptor;
18137      private static
18138        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18139          internal_static_hadoop_hdfs_RequestInfoProto_fieldAccessorTable;
18140      private static com.google.protobuf.Descriptors.Descriptor
18141        internal_static_hadoop_hdfs_SegmentStateProto_descriptor;
18142      private static
18143        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18144          internal_static_hadoop_hdfs_SegmentStateProto_fieldAccessorTable;
18145      private static com.google.protobuf.Descriptors.Descriptor
18146        internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor;
18147      private static
18148        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18149          internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_fieldAccessorTable;
18150      private static com.google.protobuf.Descriptors.Descriptor
18151        internal_static_hadoop_hdfs_JournalRequestProto_descriptor;
18152      private static
18153        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18154          internal_static_hadoop_hdfs_JournalRequestProto_fieldAccessorTable;
18155      private static com.google.protobuf.Descriptors.Descriptor
18156        internal_static_hadoop_hdfs_JournalResponseProto_descriptor;
18157      private static
18158        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18159          internal_static_hadoop_hdfs_JournalResponseProto_fieldAccessorTable;
18160      private static com.google.protobuf.Descriptors.Descriptor
18161        internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor;
18162      private static
18163        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18164          internal_static_hadoop_hdfs_HeartbeatRequestProto_fieldAccessorTable;
18165      private static com.google.protobuf.Descriptors.Descriptor
18166        internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor;
18167      private static
18168        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18169          internal_static_hadoop_hdfs_HeartbeatResponseProto_fieldAccessorTable;
18170      private static com.google.protobuf.Descriptors.Descriptor
18171        internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor;
18172      private static
18173        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18174          internal_static_hadoop_hdfs_StartLogSegmentRequestProto_fieldAccessorTable;
18175      private static com.google.protobuf.Descriptors.Descriptor
18176        internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor;
18177      private static
18178        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18179          internal_static_hadoop_hdfs_StartLogSegmentResponseProto_fieldAccessorTable;
18180      private static com.google.protobuf.Descriptors.Descriptor
18181        internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor;
18182      private static
18183        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18184          internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_fieldAccessorTable;
18185      private static com.google.protobuf.Descriptors.Descriptor
18186        internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor;
18187      private static
18188        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18189          internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_fieldAccessorTable;
18190      private static com.google.protobuf.Descriptors.Descriptor
18191        internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor;
18192      private static
18193        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18194          internal_static_hadoop_hdfs_PurgeLogsRequestProto_fieldAccessorTable;
18195      private static com.google.protobuf.Descriptors.Descriptor
18196        internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor;
18197      private static
18198        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18199          internal_static_hadoop_hdfs_PurgeLogsResponseProto_fieldAccessorTable;
18200      private static com.google.protobuf.Descriptors.Descriptor
18201        internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor;
18202      private static
18203        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18204          internal_static_hadoop_hdfs_IsFormattedRequestProto_fieldAccessorTable;
18205      private static com.google.protobuf.Descriptors.Descriptor
18206        internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor;
18207      private static
18208        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18209          internal_static_hadoop_hdfs_IsFormattedResponseProto_fieldAccessorTable;
18210      private static com.google.protobuf.Descriptors.Descriptor
18211        internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor;
18212      private static
18213        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18214          internal_static_hadoop_hdfs_GetJournalStateRequestProto_fieldAccessorTable;
18215      private static com.google.protobuf.Descriptors.Descriptor
18216        internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor;
18217      private static
18218        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18219          internal_static_hadoop_hdfs_GetJournalStateResponseProto_fieldAccessorTable;
18220      private static com.google.protobuf.Descriptors.Descriptor
18221        internal_static_hadoop_hdfs_FormatRequestProto_descriptor;
18222      private static
18223        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18224          internal_static_hadoop_hdfs_FormatRequestProto_fieldAccessorTable;
18225      private static com.google.protobuf.Descriptors.Descriptor
18226        internal_static_hadoop_hdfs_FormatResponseProto_descriptor;
18227      private static
18228        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18229          internal_static_hadoop_hdfs_FormatResponseProto_fieldAccessorTable;
18230      private static com.google.protobuf.Descriptors.Descriptor
18231        internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor;
18232      private static
18233        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18234          internal_static_hadoop_hdfs_NewEpochRequestProto_fieldAccessorTable;
18235      private static com.google.protobuf.Descriptors.Descriptor
18236        internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor;
18237      private static
18238        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18239          internal_static_hadoop_hdfs_NewEpochResponseProto_fieldAccessorTable;
18240      private static com.google.protobuf.Descriptors.Descriptor
18241        internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor;
18242      private static
18243        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18244          internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_fieldAccessorTable;
18245      private static com.google.protobuf.Descriptors.Descriptor
18246        internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor;
18247      private static
18248        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18249          internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_fieldAccessorTable;
18250      private static com.google.protobuf.Descriptors.Descriptor
18251        internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor;
18252      private static
18253        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18254          internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_fieldAccessorTable;
18255      private static com.google.protobuf.Descriptors.Descriptor
18256        internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor;
18257      private static
18258        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18259          internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_fieldAccessorTable;
18260      private static com.google.protobuf.Descriptors.Descriptor
18261        internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor;
18262      private static
18263        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18264          internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_fieldAccessorTable;
18265      private static com.google.protobuf.Descriptors.Descriptor
18266        internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor;
18267      private static
18268        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18269          internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_fieldAccessorTable;
18270    
18271      public static com.google.protobuf.Descriptors.FileDescriptor
18272          getDescriptor() {
18273        return descriptor;
18274      }
18275      private static com.google.protobuf.Descriptors.FileDescriptor
18276          descriptor;
18277      static {
18278        java.lang.String[] descriptorData = {
18279          "\n\026QJournalProtocol.proto\022\013hadoop.hdfs\032\nh" +
18280          "dfs.proto\"$\n\016JournalIdProto\022\022\n\nidentifie" +
18281          "r\030\001 \002(\t\"\201\001\n\020RequestInfoProto\022.\n\tjournalI" +
18282          "d\030\001 \002(\0132\033.hadoop.hdfs.JournalIdProto\022\r\n\005" +
18283          "epoch\030\002 \002(\004\022\027\n\017ipcSerialNumber\030\003 \002(\004\022\025\n\r" +
18284          "committedTxId\030\004 \001(\004\"M\n\021SegmentStateProto" +
18285          "\022\021\n\tstartTxId\030\001 \002(\004\022\017\n\007endTxId\030\002 \002(\004\022\024\n\014" +
18286          "isInProgress\030\003 \002(\010\"k\n\032PersistedRecoveryP" +
18287          "axosData\0224\n\014segmentState\030\001 \002(\0132\036.hadoop." +
18288          "hdfs.SegmentStateProto\022\027\n\017acceptedInEpoc",
18289          "h\030\002 \002(\004\"\221\001\n\023JournalRequestProto\022.\n\007reqIn" +
18290          "fo\030\001 \002(\0132\035.hadoop.hdfs.RequestInfoProto\022" +
18291          "\022\n\nfirstTxnId\030\002 \002(\004\022\017\n\007numTxns\030\003 \002(\r\022\017\n\007" +
18292          "records\030\004 \002(\014\022\024\n\014segmentTxnId\030\005 \002(\004\"\026\n\024J" +
18293          "ournalResponseProto\"G\n\025HeartbeatRequestP" +
18294          "roto\022.\n\007reqInfo\030\001 \002(\0132\035.hadoop.hdfs.Requ" +
18295          "estInfoProto\"\030\n\026HeartbeatResponseProto\"[" +
18296          "\n\033StartLogSegmentRequestProto\022.\n\007reqInfo" +
18297          "\030\001 \002(\0132\035.hadoop.hdfs.RequestInfoProto\022\014\n" +
18298          "\004txid\030\002 \002(\004\"\036\n\034StartLogSegmentResponsePr",
18299          "oto\"t\n\036FinalizeLogSegmentRequestProto\022.\n" +
18300          "\007reqInfo\030\001 \002(\0132\035.hadoop.hdfs.RequestInfo" +
18301          "Proto\022\021\n\tstartTxId\030\002 \002(\004\022\017\n\007endTxId\030\003 \002(" +
18302          "\004\"!\n\037FinalizeLogSegmentResponseProto\"^\n\025" +
18303          "PurgeLogsRequestProto\022.\n\007reqInfo\030\001 \002(\0132\035" +
18304          ".hadoop.hdfs.RequestInfoProto\022\025\n\rminTxId" +
18305          "ToKeep\030\002 \002(\004\"\030\n\026PurgeLogsResponseProto\"C" +
18306          "\n\027IsFormattedRequestProto\022(\n\003jid\030\001 \002(\0132\033" +
18307          ".hadoop.hdfs.JournalIdProto\"/\n\030IsFormatt" +
18308          "edResponseProto\022\023\n\013isFormatted\030\001 \002(\010\"G\n\033",
18309          "GetJournalStateRequestProto\022(\n\003jid\030\001 \002(\013" +
18310          "2\033.hadoop.hdfs.JournalIdProto\"K\n\034GetJour" +
18311          "nalStateResponseProto\022\031\n\021lastPromisedEpo" +
18312          "ch\030\001 \002(\004\022\020\n\010httpPort\030\002 \002(\r\"o\n\022FormatRequ" +
18313          "estProto\022(\n\003jid\030\001 \002(\0132\033.hadoop.hdfs.Jour" +
18314          "nalIdProto\022/\n\006nsInfo\030\002 \002(\0132\037.hadoop.hdfs" +
18315          ".NamespaceInfoProto\"\025\n\023FormatResponsePro" +
18316          "to\"\200\001\n\024NewEpochRequestProto\022(\n\003jid\030\001 \002(\013" +
18317          "2\033.hadoop.hdfs.JournalIdProto\022/\n\006nsInfo\030" +
18318          "\002 \002(\0132\037.hadoop.hdfs.NamespaceInfoProto\022\r",
18319          "\n\005epoch\030\003 \002(\004\"0\n\025NewEpochResponseProto\022\027" +
18320          "\n\017lastSegmentTxId\030\001 \001(\004\"w\n\036GetEditLogMan" +
18321          "ifestRequestProto\022(\n\003jid\030\001 \002(\0132\033.hadoop." +
18322          "hdfs.JournalIdProto\022\021\n\tsinceTxId\030\002 \002(\004\022\030" +
18323          "\n\nforReading\030\003 \001(\010:\004true\"n\n\037GetEditLogMa" +
18324          "nifestResponseProto\0229\n\010manifest\030\001 \002(\0132\'." +
18325          "hadoop.hdfs.RemoteEditLogManifestProto\022\020" +
18326          "\n\010httpPort\030\002 \002(\r\"b\n\033PrepareRecoveryReque" +
18327          "stProto\022.\n\007reqInfo\030\001 \002(\0132\035.hadoop.hdfs.R" +
18328          "equestInfoProto\022\023\n\013segmentTxId\030\002 \002(\004\"\241\001\n",
18329          "\034PrepareRecoveryResponseProto\0224\n\014segment" +
18330          "State\030\001 \001(\0132\036.hadoop.hdfs.SegmentStatePr" +
18331          "oto\022\027\n\017acceptedInEpoch\030\002 \001(\004\022\027\n\017lastWrit" +
18332          "erEpoch\030\003 \002(\004\022\031\n\021lastCommittedTxId\030\004 \001(\004" +
18333          "\"\224\001\n\032AcceptRecoveryRequestProto\022.\n\007reqIn" +
18334          "fo\030\001 \002(\0132\035.hadoop.hdfs.RequestInfoProto\022" +
18335          "5\n\rstateToAccept\030\002 \002(\0132\036.hadoop.hdfs.Seg" +
18336          "mentStateProto\022\017\n\007fromURL\030\003 \002(\t\"\035\n\033Accep" +
18337          "tRecoveryResponseProto2\220\t\n\027QJournalProto" +
18338          "colService\022Z\n\013isFormatted\022$.hadoop.hdfs.",
18339          "IsFormattedRequestProto\032%.hadoop.hdfs.Is" +
18340          "FormattedResponseProto\022f\n\017getJournalStat" +
18341          "e\022(.hadoop.hdfs.GetJournalStateRequestPr" +
18342          "oto\032).hadoop.hdfs.GetJournalStateRespons" +
18343          "eProto\022Q\n\010newEpoch\022!.hadoop.hdfs.NewEpoc" +
18344          "hRequestProto\032\".hadoop.hdfs.NewEpochResp" +
18345          "onseProto\022K\n\006format\022\037.hadoop.hdfs.Format" +
18346          "RequestProto\032 .hadoop.hdfs.FormatRespons" +
18347          "eProto\022N\n\007journal\022 .hadoop.hdfs.JournalR" +
18348          "equestProto\032!.hadoop.hdfs.JournalRespons",
18349          "eProto\022T\n\theartbeat\022\".hadoop.hdfs.Heartb" +
18350          "eatRequestProto\032#.hadoop.hdfs.HeartbeatR" +
18351          "esponseProto\022f\n\017startLogSegment\022(.hadoop" +
18352          ".hdfs.StartLogSegmentRequestProto\032).hado" +
18353          "op.hdfs.StartLogSegmentResponseProto\022o\n\022" +
18354          "finalizeLogSegment\022+.hadoop.hdfs.Finaliz" +
18355          "eLogSegmentRequestProto\032,.hadoop.hdfs.Fi" +
18356          "nalizeLogSegmentResponseProto\022T\n\tpurgeLo" +
18357          "gs\022\".hadoop.hdfs.PurgeLogsRequestProto\032#" +
18358          ".hadoop.hdfs.PurgeLogsResponseProto\022o\n\022g",
18359          "etEditLogManifest\022+.hadoop.hdfs.GetEditL" +
18360          "ogManifestRequestProto\032,.hadoop.hdfs.Get" +
18361          "EditLogManifestResponseProto\022f\n\017prepareR" +
18362          "ecovery\022(.hadoop.hdfs.PrepareRecoveryReq" +
18363          "uestProto\032).hadoop.hdfs.PrepareRecoveryR" +
18364          "esponseProto\022c\n\016acceptRecovery\022\'.hadoop." +
18365          "hdfs.AcceptRecoveryRequestProto\032(.hadoop" +
18366          ".hdfs.AcceptRecoveryResponseProtoBH\n(org" +
18367          ".apache.hadoop.hdfs.qjournal.protocolB\026Q" +
18368          "JournalProtocolProtos\210\001\001\240\001\001"
18369        };
18370        com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
18371          new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
18372            public com.google.protobuf.ExtensionRegistry assignDescriptors(
18373                com.google.protobuf.Descriptors.FileDescriptor root) {
18374              descriptor = root;
18375              internal_static_hadoop_hdfs_JournalIdProto_descriptor =
18376                getDescriptor().getMessageTypes().get(0);
18377              internal_static_hadoop_hdfs_JournalIdProto_fieldAccessorTable = new
18378                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18379                  internal_static_hadoop_hdfs_JournalIdProto_descriptor,
18380                  new java.lang.String[] { "Identifier", });
18381              internal_static_hadoop_hdfs_RequestInfoProto_descriptor =
18382                getDescriptor().getMessageTypes().get(1);
18383              internal_static_hadoop_hdfs_RequestInfoProto_fieldAccessorTable = new
18384                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18385                  internal_static_hadoop_hdfs_RequestInfoProto_descriptor,
18386                  new java.lang.String[] { "JournalId", "Epoch", "IpcSerialNumber", "CommittedTxId", });
18387              internal_static_hadoop_hdfs_SegmentStateProto_descriptor =
18388                getDescriptor().getMessageTypes().get(2);
18389              internal_static_hadoop_hdfs_SegmentStateProto_fieldAccessorTable = new
18390                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18391                  internal_static_hadoop_hdfs_SegmentStateProto_descriptor,
18392                  new java.lang.String[] { "StartTxId", "EndTxId", "IsInProgress", });
18393              internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor =
18394                getDescriptor().getMessageTypes().get(3);
18395              internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_fieldAccessorTable = new
18396                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18397                  internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor,
18398                  new java.lang.String[] { "SegmentState", "AcceptedInEpoch", });
18399              internal_static_hadoop_hdfs_JournalRequestProto_descriptor =
18400                getDescriptor().getMessageTypes().get(4);
18401              internal_static_hadoop_hdfs_JournalRequestProto_fieldAccessorTable = new
18402                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18403                  internal_static_hadoop_hdfs_JournalRequestProto_descriptor,
18404                  new java.lang.String[] { "ReqInfo", "FirstTxnId", "NumTxns", "Records", "SegmentTxnId", });
18405              internal_static_hadoop_hdfs_JournalResponseProto_descriptor =
18406                getDescriptor().getMessageTypes().get(5);
18407              internal_static_hadoop_hdfs_JournalResponseProto_fieldAccessorTable = new
18408                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18409                  internal_static_hadoop_hdfs_JournalResponseProto_descriptor,
18410                  new java.lang.String[] { });
18411              internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor =
18412                getDescriptor().getMessageTypes().get(6);
18413              internal_static_hadoop_hdfs_HeartbeatRequestProto_fieldAccessorTable = new
18414                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18415                  internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor,
18416                  new java.lang.String[] { "ReqInfo", });
18417              internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor =
18418                getDescriptor().getMessageTypes().get(7);
18419              internal_static_hadoop_hdfs_HeartbeatResponseProto_fieldAccessorTable = new
18420                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18421                  internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor,
18422                  new java.lang.String[] { });
18423              internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor =
18424                getDescriptor().getMessageTypes().get(8);
18425              internal_static_hadoop_hdfs_StartLogSegmentRequestProto_fieldAccessorTable = new
18426                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18427                  internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor,
18428                  new java.lang.String[] { "ReqInfo", "Txid", });
18429              internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor =
18430                getDescriptor().getMessageTypes().get(9);
18431              internal_static_hadoop_hdfs_StartLogSegmentResponseProto_fieldAccessorTable = new
18432                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18433                  internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor,
18434                  new java.lang.String[] { });
18435              internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor =
18436                getDescriptor().getMessageTypes().get(10);
18437              internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_fieldAccessorTable = new
18438                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18439                  internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor,
18440                  new java.lang.String[] { "ReqInfo", "StartTxId", "EndTxId", });
18441              internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor =
18442                getDescriptor().getMessageTypes().get(11);
18443              internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_fieldAccessorTable = new
18444                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18445                  internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor,
18446                  new java.lang.String[] { });
18447              internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor =
18448                getDescriptor().getMessageTypes().get(12);
18449              internal_static_hadoop_hdfs_PurgeLogsRequestProto_fieldAccessorTable = new
18450                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18451                  internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor,
18452                  new java.lang.String[] { "ReqInfo", "MinTxIdToKeep", });
18453              internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor =
18454                getDescriptor().getMessageTypes().get(13);
18455              internal_static_hadoop_hdfs_PurgeLogsResponseProto_fieldAccessorTable = new
18456                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18457                  internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor,
18458                  new java.lang.String[] { });
18459              internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor =
18460                getDescriptor().getMessageTypes().get(14);
18461              internal_static_hadoop_hdfs_IsFormattedRequestProto_fieldAccessorTable = new
18462                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18463                  internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor,
18464                  new java.lang.String[] { "Jid", });
18465              internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor =
18466                getDescriptor().getMessageTypes().get(15);
18467              internal_static_hadoop_hdfs_IsFormattedResponseProto_fieldAccessorTable = new
18468                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18469                  internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor,
18470                  new java.lang.String[] { "IsFormatted", });
18471              internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor =
18472                getDescriptor().getMessageTypes().get(16);
18473              internal_static_hadoop_hdfs_GetJournalStateRequestProto_fieldAccessorTable = new
18474                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18475                  internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor,
18476                  new java.lang.String[] { "Jid", });
18477              internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor =
18478                getDescriptor().getMessageTypes().get(17);
18479              internal_static_hadoop_hdfs_GetJournalStateResponseProto_fieldAccessorTable = new
18480                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18481                  internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor,
18482                  new java.lang.String[] { "LastPromisedEpoch", "HttpPort", });
18483              internal_static_hadoop_hdfs_FormatRequestProto_descriptor =
18484                getDescriptor().getMessageTypes().get(18);
18485              internal_static_hadoop_hdfs_FormatRequestProto_fieldAccessorTable = new
18486                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18487                  internal_static_hadoop_hdfs_FormatRequestProto_descriptor,
18488                  new java.lang.String[] { "Jid", "NsInfo", });
18489              internal_static_hadoop_hdfs_FormatResponseProto_descriptor =
18490                getDescriptor().getMessageTypes().get(19);
18491              internal_static_hadoop_hdfs_FormatResponseProto_fieldAccessorTable = new
18492                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18493                  internal_static_hadoop_hdfs_FormatResponseProto_descriptor,
18494                  new java.lang.String[] { });
18495              internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor =
18496                getDescriptor().getMessageTypes().get(20);
18497              internal_static_hadoop_hdfs_NewEpochRequestProto_fieldAccessorTable = new
18498                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18499                  internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor,
18500                  new java.lang.String[] { "Jid", "NsInfo", "Epoch", });
18501              internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor =
18502                getDescriptor().getMessageTypes().get(21);
18503              internal_static_hadoop_hdfs_NewEpochResponseProto_fieldAccessorTable = new
18504                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18505                  internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor,
18506                  new java.lang.String[] { "LastSegmentTxId", });
18507              internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor =
18508                getDescriptor().getMessageTypes().get(22);
18509              internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_fieldAccessorTable = new
18510                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18511                  internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor,
18512                  new java.lang.String[] { "Jid", "SinceTxId", "ForReading", });
18513              internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor =
18514                getDescriptor().getMessageTypes().get(23);
18515              internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_fieldAccessorTable = new
18516                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18517                  internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor,
18518                  new java.lang.String[] { "Manifest", "HttpPort", });
18519              internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor =
18520                getDescriptor().getMessageTypes().get(24);
18521              internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_fieldAccessorTable = new
18522                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18523                  internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor,
18524                  new java.lang.String[] { "ReqInfo", "SegmentTxId", });
18525              internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor =
18526                getDescriptor().getMessageTypes().get(25);
18527              internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_fieldAccessorTable = new
18528                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18529                  internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor,
18530                  new java.lang.String[] { "SegmentState", "AcceptedInEpoch", "LastWriterEpoch", "LastCommittedTxId", });
18531              internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor =
18532                getDescriptor().getMessageTypes().get(26);
18533              internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_fieldAccessorTable = new
18534                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18535                  internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor,
18536                  new java.lang.String[] { "ReqInfo", "StateToAccept", "FromURL", });
18537              internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor =
18538                getDescriptor().getMessageTypes().get(27);
18539              internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_fieldAccessorTable = new
18540                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18541                  internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor,
18542                  new java.lang.String[] { });
18543              return null;
18544            }
18545          };
18546        com.google.protobuf.Descriptors.FileDescriptor
18547          .internalBuildGeneratedFileFrom(descriptorData,
18548            new com.google.protobuf.Descriptors.FileDescriptor[] {
18549              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(),
18550            }, assigner);
18551      }
18552    
18553      // @@protoc_insertion_point(outer_class_scope)
18554    }