001    // Generated by the protocol buffer compiler.  DO NOT EDIT!
002    // source: QJournalProtocol.proto
003    
004    package org.apache.hadoop.hdfs.qjournal.protocol;
005    
006    public final class QJournalProtocolProtos {
007      private QJournalProtocolProtos() {}
008      public static void registerAllExtensions(
009          com.google.protobuf.ExtensionRegistry registry) {
010      }
011      public interface JournalIdProtoOrBuilder
012          extends com.google.protobuf.MessageOrBuilder {
013    
014        // required string identifier = 1;
015        /**
016         * <code>required string identifier = 1;</code>
017         */
018        boolean hasIdentifier();
019        /**
020         * <code>required string identifier = 1;</code>
021         */
022        java.lang.String getIdentifier();
023        /**
024         * <code>required string identifier = 1;</code>
025         */
026        com.google.protobuf.ByteString
027            getIdentifierBytes();
028      }
029      /**
030       * Protobuf type {@code hadoop.hdfs.JournalIdProto}
031       */
032      public static final class JournalIdProto extends
033          com.google.protobuf.GeneratedMessage
034          implements JournalIdProtoOrBuilder {
035        // Use JournalIdProto.newBuilder() to construct.
036        private JournalIdProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
037          super(builder);
038          this.unknownFields = builder.getUnknownFields();
039        }
040        private JournalIdProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
041    
042        private static final JournalIdProto defaultInstance;
043        public static JournalIdProto getDefaultInstance() {
044          return defaultInstance;
045        }
046    
047        public JournalIdProto getDefaultInstanceForType() {
048          return defaultInstance;
049        }
050    
051        private final com.google.protobuf.UnknownFieldSet unknownFields;
052        @java.lang.Override
053        public final com.google.protobuf.UnknownFieldSet
054            getUnknownFields() {
055          return this.unknownFields;
056        }
057        private JournalIdProto(
058            com.google.protobuf.CodedInputStream input,
059            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
060            throws com.google.protobuf.InvalidProtocolBufferException {
061          initFields();
062          int mutable_bitField0_ = 0;
063          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
064              com.google.protobuf.UnknownFieldSet.newBuilder();
065          try {
066            boolean done = false;
067            while (!done) {
068              int tag = input.readTag();
069              switch (tag) {
070                case 0:
071                  done = true;
072                  break;
073                default: {
074                  if (!parseUnknownField(input, unknownFields,
075                                         extensionRegistry, tag)) {
076                    done = true;
077                  }
078                  break;
079                }
080                case 10: {
081                  bitField0_ |= 0x00000001;
082                  identifier_ = input.readBytes();
083                  break;
084                }
085              }
086            }
087          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
088            throw e.setUnfinishedMessage(this);
089          } catch (java.io.IOException e) {
090            throw new com.google.protobuf.InvalidProtocolBufferException(
091                e.getMessage()).setUnfinishedMessage(this);
092          } finally {
093            this.unknownFields = unknownFields.build();
094            makeExtensionsImmutable();
095          }
096        }
097        public static final com.google.protobuf.Descriptors.Descriptor
098            getDescriptor() {
099          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalIdProto_descriptor;
100        }
101    
102        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
103            internalGetFieldAccessorTable() {
104          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalIdProto_fieldAccessorTable
105              .ensureFieldAccessorsInitialized(
106                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder.class);
107        }
108    
109        public static com.google.protobuf.Parser<JournalIdProto> PARSER =
110            new com.google.protobuf.AbstractParser<JournalIdProto>() {
111          public JournalIdProto parsePartialFrom(
112              com.google.protobuf.CodedInputStream input,
113              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
114              throws com.google.protobuf.InvalidProtocolBufferException {
115            return new JournalIdProto(input, extensionRegistry);
116          }
117        };
118    
119        @java.lang.Override
120        public com.google.protobuf.Parser<JournalIdProto> getParserForType() {
121          return PARSER;
122        }
123    
124        private int bitField0_;
125        // required string identifier = 1;
126        public static final int IDENTIFIER_FIELD_NUMBER = 1;
127        private java.lang.Object identifier_;
128        /**
129         * <code>required string identifier = 1;</code>
130         */
131        public boolean hasIdentifier() {
132          return ((bitField0_ & 0x00000001) == 0x00000001);
133        }
134        /**
135         * <code>required string identifier = 1;</code>
136         */
137        public java.lang.String getIdentifier() {
138          java.lang.Object ref = identifier_;
139          if (ref instanceof java.lang.String) {
140            return (java.lang.String) ref;
141          } else {
142            com.google.protobuf.ByteString bs = 
143                (com.google.protobuf.ByteString) ref;
144            java.lang.String s = bs.toStringUtf8();
145            if (bs.isValidUtf8()) {
146              identifier_ = s;
147            }
148            return s;
149          }
150        }
151        /**
152         * <code>required string identifier = 1;</code>
153         */
154        public com.google.protobuf.ByteString
155            getIdentifierBytes() {
156          java.lang.Object ref = identifier_;
157          if (ref instanceof java.lang.String) {
158            com.google.protobuf.ByteString b = 
159                com.google.protobuf.ByteString.copyFromUtf8(
160                    (java.lang.String) ref);
161            identifier_ = b;
162            return b;
163          } else {
164            return (com.google.protobuf.ByteString) ref;
165          }
166        }
167    
168        private void initFields() {
169          identifier_ = "";
170        }
171        private byte memoizedIsInitialized = -1;
172        public final boolean isInitialized() {
173          byte isInitialized = memoizedIsInitialized;
174          if (isInitialized != -1) return isInitialized == 1;
175    
176          if (!hasIdentifier()) {
177            memoizedIsInitialized = 0;
178            return false;
179          }
180          memoizedIsInitialized = 1;
181          return true;
182        }
183    
184        public void writeTo(com.google.protobuf.CodedOutputStream output)
185                            throws java.io.IOException {
186          getSerializedSize();
187          if (((bitField0_ & 0x00000001) == 0x00000001)) {
188            output.writeBytes(1, getIdentifierBytes());
189          }
190          getUnknownFields().writeTo(output);
191        }
192    
193        private int memoizedSerializedSize = -1;
194        public int getSerializedSize() {
195          int size = memoizedSerializedSize;
196          if (size != -1) return size;
197    
198          size = 0;
199          if (((bitField0_ & 0x00000001) == 0x00000001)) {
200            size += com.google.protobuf.CodedOutputStream
201              .computeBytesSize(1, getIdentifierBytes());
202          }
203          size += getUnknownFields().getSerializedSize();
204          memoizedSerializedSize = size;
205          return size;
206        }
207    
208        private static final long serialVersionUID = 0L;
209        @java.lang.Override
210        protected java.lang.Object writeReplace()
211            throws java.io.ObjectStreamException {
212          return super.writeReplace();
213        }
214    
215        @java.lang.Override
216        public boolean equals(final java.lang.Object obj) {
217          if (obj == this) {
218           return true;
219          }
220          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto)) {
221            return super.equals(obj);
222          }
223          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto) obj;
224    
225          boolean result = true;
226          result = result && (hasIdentifier() == other.hasIdentifier());
227          if (hasIdentifier()) {
228            result = result && getIdentifier()
229                .equals(other.getIdentifier());
230          }
231          result = result &&
232              getUnknownFields().equals(other.getUnknownFields());
233          return result;
234        }
235    
236        private int memoizedHashCode = 0;
237        @java.lang.Override
238        public int hashCode() {
239          if (memoizedHashCode != 0) {
240            return memoizedHashCode;
241          }
242          int hash = 41;
243          hash = (19 * hash) + getDescriptorForType().hashCode();
244          if (hasIdentifier()) {
245            hash = (37 * hash) + IDENTIFIER_FIELD_NUMBER;
246            hash = (53 * hash) + getIdentifier().hashCode();
247          }
248          hash = (29 * hash) + getUnknownFields().hashCode();
249          memoizedHashCode = hash;
250          return hash;
251        }
252    
253        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
254            com.google.protobuf.ByteString data)
255            throws com.google.protobuf.InvalidProtocolBufferException {
256          return PARSER.parseFrom(data);
257        }
258        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
259            com.google.protobuf.ByteString data,
260            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
261            throws com.google.protobuf.InvalidProtocolBufferException {
262          return PARSER.parseFrom(data, extensionRegistry);
263        }
264        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(byte[] data)
265            throws com.google.protobuf.InvalidProtocolBufferException {
266          return PARSER.parseFrom(data);
267        }
268        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
269            byte[] data,
270            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
271            throws com.google.protobuf.InvalidProtocolBufferException {
272          return PARSER.parseFrom(data, extensionRegistry);
273        }
274        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(java.io.InputStream input)
275            throws java.io.IOException {
276          return PARSER.parseFrom(input);
277        }
278        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
279            java.io.InputStream input,
280            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
281            throws java.io.IOException {
282          return PARSER.parseFrom(input, extensionRegistry);
283        }
284        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseDelimitedFrom(java.io.InputStream input)
285            throws java.io.IOException {
286          return PARSER.parseDelimitedFrom(input);
287        }
288        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseDelimitedFrom(
289            java.io.InputStream input,
290            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
291            throws java.io.IOException {
292          return PARSER.parseDelimitedFrom(input, extensionRegistry);
293        }
294        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
295            com.google.protobuf.CodedInputStream input)
296            throws java.io.IOException {
297          return PARSER.parseFrom(input);
298        }
299        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
300            com.google.protobuf.CodedInputStream input,
301            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
302            throws java.io.IOException {
303          return PARSER.parseFrom(input, extensionRegistry);
304        }
305    
306        public static Builder newBuilder() { return Builder.create(); }
307        public Builder newBuilderForType() { return newBuilder(); }
308        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto prototype) {
309          return newBuilder().mergeFrom(prototype);
310        }
311        public Builder toBuilder() { return newBuilder(this); }
312    
313        @java.lang.Override
314        protected Builder newBuilderForType(
315            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
316          Builder builder = new Builder(parent);
317          return builder;
318        }
319        /**
320         * Protobuf type {@code hadoop.hdfs.JournalIdProto}
321         */
322        public static final class Builder extends
323            com.google.protobuf.GeneratedMessage.Builder<Builder>
324           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder {
325          public static final com.google.protobuf.Descriptors.Descriptor
326              getDescriptor() {
327            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalIdProto_descriptor;
328          }
329    
330          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
331              internalGetFieldAccessorTable() {
332            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalIdProto_fieldAccessorTable
333                .ensureFieldAccessorsInitialized(
334                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder.class);
335          }
336    
337          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder()
338          private Builder() {
339            maybeForceBuilderInitialization();
340          }
341    
342          private Builder(
343              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
344            super(parent);
345            maybeForceBuilderInitialization();
346          }
347          private void maybeForceBuilderInitialization() {
348            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
349            }
350          }
351          private static Builder create() {
352            return new Builder();
353          }
354    
355          public Builder clear() {
356            super.clear();
357            identifier_ = "";
358            bitField0_ = (bitField0_ & ~0x00000001);
359            return this;
360          }
361    
362          public Builder clone() {
363            return create().mergeFrom(buildPartial());
364          }
365    
366          public com.google.protobuf.Descriptors.Descriptor
367              getDescriptorForType() {
368            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalIdProto_descriptor;
369          }
370    
371          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getDefaultInstanceForType() {
372            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
373          }
374    
375          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto build() {
376            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto result = buildPartial();
377            if (!result.isInitialized()) {
378              throw newUninitializedMessageException(result);
379            }
380            return result;
381          }
382    
383          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto buildPartial() {
384            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto(this);
385            int from_bitField0_ = bitField0_;
386            int to_bitField0_ = 0;
387            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
388              to_bitField0_ |= 0x00000001;
389            }
390            result.identifier_ = identifier_;
391            result.bitField0_ = to_bitField0_;
392            onBuilt();
393            return result;
394          }
395    
396          public Builder mergeFrom(com.google.protobuf.Message other) {
397            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto) {
398              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto)other);
399            } else {
400              super.mergeFrom(other);
401              return this;
402            }
403          }
404    
405          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto other) {
406            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) return this;
407            if (other.hasIdentifier()) {
408              bitField0_ |= 0x00000001;
409              identifier_ = other.identifier_;
410              onChanged();
411            }
412            this.mergeUnknownFields(other.getUnknownFields());
413            return this;
414          }
415    
416          public final boolean isInitialized() {
417            if (!hasIdentifier()) {
418              
419              return false;
420            }
421            return true;
422          }
423    
424          public Builder mergeFrom(
425              com.google.protobuf.CodedInputStream input,
426              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
427              throws java.io.IOException {
428            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parsedMessage = null;
429            try {
430              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
431            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
432              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto) e.getUnfinishedMessage();
433              throw e;
434            } finally {
435              if (parsedMessage != null) {
436                mergeFrom(parsedMessage);
437              }
438            }
439            return this;
440          }
441          private int bitField0_;
442    
443          // required string identifier = 1;
444          private java.lang.Object identifier_ = "";
445          /**
446           * <code>required string identifier = 1;</code>
447           */
448          public boolean hasIdentifier() {
449            return ((bitField0_ & 0x00000001) == 0x00000001);
450          }
451          /**
452           * <code>required string identifier = 1;</code>
453           */
454          public java.lang.String getIdentifier() {
455            java.lang.Object ref = identifier_;
456            if (!(ref instanceof java.lang.String)) {
457              java.lang.String s = ((com.google.protobuf.ByteString) ref)
458                  .toStringUtf8();
459              identifier_ = s;
460              return s;
461            } else {
462              return (java.lang.String) ref;
463            }
464          }
465          /**
466           * <code>required string identifier = 1;</code>
467           */
468          public com.google.protobuf.ByteString
469              getIdentifierBytes() {
470            java.lang.Object ref = identifier_;
471            if (ref instanceof String) {
472              com.google.protobuf.ByteString b = 
473                  com.google.protobuf.ByteString.copyFromUtf8(
474                      (java.lang.String) ref);
475              identifier_ = b;
476              return b;
477            } else {
478              return (com.google.protobuf.ByteString) ref;
479            }
480          }
481          /**
482           * <code>required string identifier = 1;</code>
483           */
484          public Builder setIdentifier(
485              java.lang.String value) {
486            if (value == null) {
487        throw new NullPointerException();
488      }
489      bitField0_ |= 0x00000001;
490            identifier_ = value;
491            onChanged();
492            return this;
493          }
494          /**
495           * <code>required string identifier = 1;</code>
496           */
497          public Builder clearIdentifier() {
498            bitField0_ = (bitField0_ & ~0x00000001);
499            identifier_ = getDefaultInstance().getIdentifier();
500            onChanged();
501            return this;
502          }
503          /**
504           * <code>required string identifier = 1;</code>
505           */
506          public Builder setIdentifierBytes(
507              com.google.protobuf.ByteString value) {
508            if (value == null) {
509        throw new NullPointerException();
510      }
511      bitField0_ |= 0x00000001;
512            identifier_ = value;
513            onChanged();
514            return this;
515          }
516    
517          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.JournalIdProto)
518        }
519    
520        static {
521          defaultInstance = new JournalIdProto(true);
522          defaultInstance.initFields();
523        }
524    
525        // @@protoc_insertion_point(class_scope:hadoop.hdfs.JournalIdProto)
526      }
527    
528      public interface RequestInfoProtoOrBuilder
529          extends com.google.protobuf.MessageOrBuilder {
530    
531        // required .hadoop.hdfs.JournalIdProto journalId = 1;
532        /**
533         * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
534         */
535        boolean hasJournalId();
536        /**
537         * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
538         */
539        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJournalId();
540        /**
541         * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
542         */
543        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJournalIdOrBuilder();
544    
545        // required uint64 epoch = 2;
546        /**
547         * <code>required uint64 epoch = 2;</code>
548         */
549        boolean hasEpoch();
550        /**
551         * <code>required uint64 epoch = 2;</code>
552         */
553        long getEpoch();
554    
555        // required uint64 ipcSerialNumber = 3;
556        /**
557         * <code>required uint64 ipcSerialNumber = 3;</code>
558         */
559        boolean hasIpcSerialNumber();
560        /**
561         * <code>required uint64 ipcSerialNumber = 3;</code>
562         */
563        long getIpcSerialNumber();
564    
565        // optional uint64 committedTxId = 4;
566        /**
567         * <code>optional uint64 committedTxId = 4;</code>
568         *
569         * <pre>
570         * Whenever a writer makes a request, it informs
571         * the node of the latest committed txid. This may
572         * be higher than the transaction data included in the
573         * request itself, eg in the case that the node has
574         * fallen behind.
575         * </pre>
576         */
577        boolean hasCommittedTxId();
578        /**
579         * <code>optional uint64 committedTxId = 4;</code>
580         *
581         * <pre>
582         * Whenever a writer makes a request, it informs
583         * the node of the latest committed txid. This may
584         * be higher than the transaction data included in the
585         * request itself, eg in the case that the node has
586         * fallen behind.
587         * </pre>
588         */
589        long getCommittedTxId();
590      }
591      /**
592       * Protobuf type {@code hadoop.hdfs.RequestInfoProto}
593       */
594      public static final class RequestInfoProto extends
595          com.google.protobuf.GeneratedMessage
596          implements RequestInfoProtoOrBuilder {
597        // Use RequestInfoProto.newBuilder() to construct.
598        private RequestInfoProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
599          super(builder);
600          this.unknownFields = builder.getUnknownFields();
601        }
602        private RequestInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
603    
604        private static final RequestInfoProto defaultInstance;
605        public static RequestInfoProto getDefaultInstance() {
606          return defaultInstance;
607        }
608    
609        public RequestInfoProto getDefaultInstanceForType() {
610          return defaultInstance;
611        }
612    
613        private final com.google.protobuf.UnknownFieldSet unknownFields;
614        @java.lang.Override
615        public final com.google.protobuf.UnknownFieldSet
616            getUnknownFields() {
617          return this.unknownFields;
618        }
619        private RequestInfoProto(
620            com.google.protobuf.CodedInputStream input,
621            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
622            throws com.google.protobuf.InvalidProtocolBufferException {
623          initFields();
624          int mutable_bitField0_ = 0;
625          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
626              com.google.protobuf.UnknownFieldSet.newBuilder();
627          try {
628            boolean done = false;
629            while (!done) {
630              int tag = input.readTag();
631              switch (tag) {
632                case 0:
633                  done = true;
634                  break;
635                default: {
636                  if (!parseUnknownField(input, unknownFields,
637                                         extensionRegistry, tag)) {
638                    done = true;
639                  }
640                  break;
641                }
642                case 10: {
643                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
644                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
645                    subBuilder = journalId_.toBuilder();
646                  }
647                  journalId_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
648                  if (subBuilder != null) {
649                    subBuilder.mergeFrom(journalId_);
650                    journalId_ = subBuilder.buildPartial();
651                  }
652                  bitField0_ |= 0x00000001;
653                  break;
654                }
655                case 16: {
656                  bitField0_ |= 0x00000002;
657                  epoch_ = input.readUInt64();
658                  break;
659                }
660                case 24: {
661                  bitField0_ |= 0x00000004;
662                  ipcSerialNumber_ = input.readUInt64();
663                  break;
664                }
665                case 32: {
666                  bitField0_ |= 0x00000008;
667                  committedTxId_ = input.readUInt64();
668                  break;
669                }
670              }
671            }
672          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
673            throw e.setUnfinishedMessage(this);
674          } catch (java.io.IOException e) {
675            throw new com.google.protobuf.InvalidProtocolBufferException(
676                e.getMessage()).setUnfinishedMessage(this);
677          } finally {
678            this.unknownFields = unknownFields.build();
679            makeExtensionsImmutable();
680          }
681        }
682        public static final com.google.protobuf.Descriptors.Descriptor
683            getDescriptor() {
684          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_RequestInfoProto_descriptor;
685        }
686    
687        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
688            internalGetFieldAccessorTable() {
689          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_RequestInfoProto_fieldAccessorTable
690              .ensureFieldAccessorsInitialized(
691                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder.class);
692        }
693    
694        public static com.google.protobuf.Parser<RequestInfoProto> PARSER =
695            new com.google.protobuf.AbstractParser<RequestInfoProto>() {
696          public RequestInfoProto parsePartialFrom(
697              com.google.protobuf.CodedInputStream input,
698              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
699              throws com.google.protobuf.InvalidProtocolBufferException {
700            return new RequestInfoProto(input, extensionRegistry);
701          }
702        };
703    
704        @java.lang.Override
705        public com.google.protobuf.Parser<RequestInfoProto> getParserForType() {
706          return PARSER;
707        }
708    
709        private int bitField0_;
710        // required .hadoop.hdfs.JournalIdProto journalId = 1;
711        public static final int JOURNALID_FIELD_NUMBER = 1;
712        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto journalId_;
713        /**
714         * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
715         */
716        public boolean hasJournalId() {
717          return ((bitField0_ & 0x00000001) == 0x00000001);
718        }
719        /**
720         * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
721         */
722        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJournalId() {
723          return journalId_;
724        }
725        /**
726         * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
727         */
728        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJournalIdOrBuilder() {
729          return journalId_;
730        }
731    
732        // required uint64 epoch = 2;
733        public static final int EPOCH_FIELD_NUMBER = 2;
734        private long epoch_;
735        /**
736         * <code>required uint64 epoch = 2;</code>
737         */
738        public boolean hasEpoch() {
739          return ((bitField0_ & 0x00000002) == 0x00000002);
740        }
741        /**
742         * <code>required uint64 epoch = 2;</code>
743         */
744        public long getEpoch() {
745          return epoch_;
746        }
747    
748        // required uint64 ipcSerialNumber = 3;
749        public static final int IPCSERIALNUMBER_FIELD_NUMBER = 3;
750        private long ipcSerialNumber_;
751        /**
752         * <code>required uint64 ipcSerialNumber = 3;</code>
753         */
754        public boolean hasIpcSerialNumber() {
755          return ((bitField0_ & 0x00000004) == 0x00000004);
756        }
757        /**
758         * <code>required uint64 ipcSerialNumber = 3;</code>
759         */
760        public long getIpcSerialNumber() {
761          return ipcSerialNumber_;
762        }
763    
764        // optional uint64 committedTxId = 4;
765        public static final int COMMITTEDTXID_FIELD_NUMBER = 4;
766        private long committedTxId_;
767        /**
768         * <code>optional uint64 committedTxId = 4;</code>
769         *
770         * <pre>
771         * Whenever a writer makes a request, it informs
772         * the node of the latest committed txid. This may
773         * be higher than the transaction data included in the
774         * request itself, eg in the case that the node has
775         * fallen behind.
776         * </pre>
777         */
778        public boolean hasCommittedTxId() {
779          return ((bitField0_ & 0x00000008) == 0x00000008);
780        }
781        /**
782         * <code>optional uint64 committedTxId = 4;</code>
783         *
784         * <pre>
785         * Whenever a writer makes a request, it informs
786         * the node of the latest committed txid. This may
787         * be higher than the transaction data included in the
788         * request itself, eg in the case that the node has
789         * fallen behind.
790         * </pre>
791         */
792        public long getCommittedTxId() {
793          return committedTxId_;
794        }
795    
796        private void initFields() {
797          journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
798          epoch_ = 0L;
799          ipcSerialNumber_ = 0L;
800          committedTxId_ = 0L;
801        }
802        private byte memoizedIsInitialized = -1;
803        public final boolean isInitialized() {
804          byte isInitialized = memoizedIsInitialized;
805          if (isInitialized != -1) return isInitialized == 1;
806    
807          if (!hasJournalId()) {
808            memoizedIsInitialized = 0;
809            return false;
810          }
811          if (!hasEpoch()) {
812            memoizedIsInitialized = 0;
813            return false;
814          }
815          if (!hasIpcSerialNumber()) {
816            memoizedIsInitialized = 0;
817            return false;
818          }
819          if (!getJournalId().isInitialized()) {
820            memoizedIsInitialized = 0;
821            return false;
822          }
823          memoizedIsInitialized = 1;
824          return true;
825        }
826    
827        public void writeTo(com.google.protobuf.CodedOutputStream output)
828                            throws java.io.IOException {
829          getSerializedSize();
830          if (((bitField0_ & 0x00000001) == 0x00000001)) {
831            output.writeMessage(1, journalId_);
832          }
833          if (((bitField0_ & 0x00000002) == 0x00000002)) {
834            output.writeUInt64(2, epoch_);
835          }
836          if (((bitField0_ & 0x00000004) == 0x00000004)) {
837            output.writeUInt64(3, ipcSerialNumber_);
838          }
839          if (((bitField0_ & 0x00000008) == 0x00000008)) {
840            output.writeUInt64(4, committedTxId_);
841          }
842          getUnknownFields().writeTo(output);
843        }
844    
845        private int memoizedSerializedSize = -1;
846        public int getSerializedSize() {
847          int size = memoizedSerializedSize;
848          if (size != -1) return size;
849    
850          size = 0;
851          if (((bitField0_ & 0x00000001) == 0x00000001)) {
852            size += com.google.protobuf.CodedOutputStream
853              .computeMessageSize(1, journalId_);
854          }
855          if (((bitField0_ & 0x00000002) == 0x00000002)) {
856            size += com.google.protobuf.CodedOutputStream
857              .computeUInt64Size(2, epoch_);
858          }
859          if (((bitField0_ & 0x00000004) == 0x00000004)) {
860            size += com.google.protobuf.CodedOutputStream
861              .computeUInt64Size(3, ipcSerialNumber_);
862          }
863          if (((bitField0_ & 0x00000008) == 0x00000008)) {
864            size += com.google.protobuf.CodedOutputStream
865              .computeUInt64Size(4, committedTxId_);
866          }
867          size += getUnknownFields().getSerializedSize();
868          memoizedSerializedSize = size;
869          return size;
870        }
871    
872        private static final long serialVersionUID = 0L;
873        @java.lang.Override
874        protected java.lang.Object writeReplace()
875            throws java.io.ObjectStreamException {
876          return super.writeReplace();
877        }
878    
879        @java.lang.Override
880        public boolean equals(final java.lang.Object obj) {
881          if (obj == this) {
882           return true;
883          }
884          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto)) {
885            return super.equals(obj);
886          }
887          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto) obj;
888    
889          boolean result = true;
890          result = result && (hasJournalId() == other.hasJournalId());
891          if (hasJournalId()) {
892            result = result && getJournalId()
893                .equals(other.getJournalId());
894          }
895          result = result && (hasEpoch() == other.hasEpoch());
896          if (hasEpoch()) {
897            result = result && (getEpoch()
898                == other.getEpoch());
899          }
900          result = result && (hasIpcSerialNumber() == other.hasIpcSerialNumber());
901          if (hasIpcSerialNumber()) {
902            result = result && (getIpcSerialNumber()
903                == other.getIpcSerialNumber());
904          }
905          result = result && (hasCommittedTxId() == other.hasCommittedTxId());
906          if (hasCommittedTxId()) {
907            result = result && (getCommittedTxId()
908                == other.getCommittedTxId());
909          }
910          result = result &&
911              getUnknownFields().equals(other.getUnknownFields());
912          return result;
913        }
914    
915        private int memoizedHashCode = 0;
916        @java.lang.Override
917        public int hashCode() {
918          if (memoizedHashCode != 0) {
919            return memoizedHashCode;
920          }
921          int hash = 41;
922          hash = (19 * hash) + getDescriptorForType().hashCode();
923          if (hasJournalId()) {
924            hash = (37 * hash) + JOURNALID_FIELD_NUMBER;
925            hash = (53 * hash) + getJournalId().hashCode();
926          }
927          if (hasEpoch()) {
928            hash = (37 * hash) + EPOCH_FIELD_NUMBER;
929            hash = (53 * hash) + hashLong(getEpoch());
930          }
931          if (hasIpcSerialNumber()) {
932            hash = (37 * hash) + IPCSERIALNUMBER_FIELD_NUMBER;
933            hash = (53 * hash) + hashLong(getIpcSerialNumber());
934          }
935          if (hasCommittedTxId()) {
936            hash = (37 * hash) + COMMITTEDTXID_FIELD_NUMBER;
937            hash = (53 * hash) + hashLong(getCommittedTxId());
938          }
939          hash = (29 * hash) + getUnknownFields().hashCode();
940          memoizedHashCode = hash;
941          return hash;
942        }
943    
944        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
945            com.google.protobuf.ByteString data)
946            throws com.google.protobuf.InvalidProtocolBufferException {
947          return PARSER.parseFrom(data);
948        }
949        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
950            com.google.protobuf.ByteString data,
951            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
952            throws com.google.protobuf.InvalidProtocolBufferException {
953          return PARSER.parseFrom(data, extensionRegistry);
954        }
955        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(byte[] data)
956            throws com.google.protobuf.InvalidProtocolBufferException {
957          return PARSER.parseFrom(data);
958        }
959        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
960            byte[] data,
961            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
962            throws com.google.protobuf.InvalidProtocolBufferException {
963          return PARSER.parseFrom(data, extensionRegistry);
964        }
965        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(java.io.InputStream input)
966            throws java.io.IOException {
967          return PARSER.parseFrom(input);
968        }
969        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
970            java.io.InputStream input,
971            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
972            throws java.io.IOException {
973          return PARSER.parseFrom(input, extensionRegistry);
974        }
975        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseDelimitedFrom(java.io.InputStream input)
976            throws java.io.IOException {
977          return PARSER.parseDelimitedFrom(input);
978        }
979        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseDelimitedFrom(
980            java.io.InputStream input,
981            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
982            throws java.io.IOException {
983          return PARSER.parseDelimitedFrom(input, extensionRegistry);
984        }
985        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
986            com.google.protobuf.CodedInputStream input)
987            throws java.io.IOException {
988          return PARSER.parseFrom(input);
989        }
990        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
991            com.google.protobuf.CodedInputStream input,
992            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
993            throws java.io.IOException {
994          return PARSER.parseFrom(input, extensionRegistry);
995        }
996    
997        public static Builder newBuilder() { return Builder.create(); }
998        public Builder newBuilderForType() { return newBuilder(); }
999        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto prototype) {
1000          return newBuilder().mergeFrom(prototype);
1001        }
1002        public Builder toBuilder() { return newBuilder(this); }
1003    
1004        @java.lang.Override
1005        protected Builder newBuilderForType(
1006            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1007          Builder builder = new Builder(parent);
1008          return builder;
1009        }
1010        /**
1011         * Protobuf type {@code hadoop.hdfs.RequestInfoProto}
1012         */
1013        public static final class Builder extends
1014            com.google.protobuf.GeneratedMessage.Builder<Builder>
1015           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder {
1016          public static final com.google.protobuf.Descriptors.Descriptor
1017              getDescriptor() {
1018            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_RequestInfoProto_descriptor;
1019          }
1020    
1021          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1022              internalGetFieldAccessorTable() {
1023            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_RequestInfoProto_fieldAccessorTable
1024                .ensureFieldAccessorsInitialized(
1025                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder.class);
1026          }
1027    
1028          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder()
1029          private Builder() {
1030            maybeForceBuilderInitialization();
1031          }
1032    
1033          private Builder(
1034              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1035            super(parent);
1036            maybeForceBuilderInitialization();
1037          }
1038          private void maybeForceBuilderInitialization() {
1039            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
1040              getJournalIdFieldBuilder();
1041            }
1042          }
1043          private static Builder create() {
1044            return new Builder();
1045          }
1046    
1047          public Builder clear() {
1048            super.clear();
1049            if (journalIdBuilder_ == null) {
1050              journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
1051            } else {
1052              journalIdBuilder_.clear();
1053            }
1054            bitField0_ = (bitField0_ & ~0x00000001);
1055            epoch_ = 0L;
1056            bitField0_ = (bitField0_ & ~0x00000002);
1057            ipcSerialNumber_ = 0L;
1058            bitField0_ = (bitField0_ & ~0x00000004);
1059            committedTxId_ = 0L;
1060            bitField0_ = (bitField0_ & ~0x00000008);
1061            return this;
1062          }
1063    
1064          public Builder clone() {
1065            return create().mergeFrom(buildPartial());
1066          }
1067    
1068          public com.google.protobuf.Descriptors.Descriptor
1069              getDescriptorForType() {
1070            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_RequestInfoProto_descriptor;
1071          }
1072    
1073          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getDefaultInstanceForType() {
1074            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
1075          }
1076    
1077          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto build() {
1078            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto result = buildPartial();
1079            if (!result.isInitialized()) {
1080              throw newUninitializedMessageException(result);
1081            }
1082            return result;
1083          }
1084    
1085          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto buildPartial() {
1086            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto(this);
1087            int from_bitField0_ = bitField0_;
1088            int to_bitField0_ = 0;
1089            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
1090              to_bitField0_ |= 0x00000001;
1091            }
1092            if (journalIdBuilder_ == null) {
1093              result.journalId_ = journalId_;
1094            } else {
1095              result.journalId_ = journalIdBuilder_.build();
1096            }
1097            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
1098              to_bitField0_ |= 0x00000002;
1099            }
1100            result.epoch_ = epoch_;
1101            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
1102              to_bitField0_ |= 0x00000004;
1103            }
1104            result.ipcSerialNumber_ = ipcSerialNumber_;
1105            if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
1106              to_bitField0_ |= 0x00000008;
1107            }
1108            result.committedTxId_ = committedTxId_;
1109            result.bitField0_ = to_bitField0_;
1110            onBuilt();
1111            return result;
1112          }
1113    
1114          public Builder mergeFrom(com.google.protobuf.Message other) {
1115            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto) {
1116              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto)other);
1117            } else {
1118              super.mergeFrom(other);
1119              return this;
1120            }
1121          }
1122    
1123          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto other) {
1124            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) return this;
1125            if (other.hasJournalId()) {
1126              mergeJournalId(other.getJournalId());
1127            }
1128            if (other.hasEpoch()) {
1129              setEpoch(other.getEpoch());
1130            }
1131            if (other.hasIpcSerialNumber()) {
1132              setIpcSerialNumber(other.getIpcSerialNumber());
1133            }
1134            if (other.hasCommittedTxId()) {
1135              setCommittedTxId(other.getCommittedTxId());
1136            }
1137            this.mergeUnknownFields(other.getUnknownFields());
1138            return this;
1139          }
1140    
1141          public final boolean isInitialized() {
1142            if (!hasJournalId()) {
1143              
1144              return false;
1145            }
1146            if (!hasEpoch()) {
1147              
1148              return false;
1149            }
1150            if (!hasIpcSerialNumber()) {
1151              
1152              return false;
1153            }
1154            if (!getJournalId().isInitialized()) {
1155              
1156              return false;
1157            }
1158            return true;
1159          }
1160    
1161          public Builder mergeFrom(
1162              com.google.protobuf.CodedInputStream input,
1163              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1164              throws java.io.IOException {
1165            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parsedMessage = null;
1166            try {
1167              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
1168            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1169              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto) e.getUnfinishedMessage();
1170              throw e;
1171            } finally {
1172              if (parsedMessage != null) {
1173                mergeFrom(parsedMessage);
1174              }
1175            }
1176            return this;
1177          }
1178          private int bitField0_;
1179    
1180          // required .hadoop.hdfs.JournalIdProto journalId = 1;
1181          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
1182          private com.google.protobuf.SingleFieldBuilder<
1183              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> journalIdBuilder_;
1184          /**
1185           * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1186           */
1187          public boolean hasJournalId() {
1188            return ((bitField0_ & 0x00000001) == 0x00000001);
1189          }
1190          /**
1191           * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1192           */
1193          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJournalId() {
1194            if (journalIdBuilder_ == null) {
1195              return journalId_;
1196            } else {
1197              return journalIdBuilder_.getMessage();
1198            }
1199          }
1200          /**
1201           * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1202           */
1203          public Builder setJournalId(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
1204            if (journalIdBuilder_ == null) {
1205              if (value == null) {
1206                throw new NullPointerException();
1207              }
1208              journalId_ = value;
1209              onChanged();
1210            } else {
1211              journalIdBuilder_.setMessage(value);
1212            }
1213            bitField0_ |= 0x00000001;
1214            return this;
1215          }
1216          /**
1217           * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1218           */
1219          public Builder setJournalId(
1220              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
1221            if (journalIdBuilder_ == null) {
1222              journalId_ = builderForValue.build();
1223              onChanged();
1224            } else {
1225              journalIdBuilder_.setMessage(builderForValue.build());
1226            }
1227            bitField0_ |= 0x00000001;
1228            return this;
1229          }
1230          /**
1231           * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1232           */
1233          public Builder mergeJournalId(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
1234            if (journalIdBuilder_ == null) {
1235              if (((bitField0_ & 0x00000001) == 0x00000001) &&
1236                  journalId_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
1237                journalId_ =
1238                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(journalId_).mergeFrom(value).buildPartial();
1239              } else {
1240                journalId_ = value;
1241              }
1242              onChanged();
1243            } else {
1244              journalIdBuilder_.mergeFrom(value);
1245            }
1246            bitField0_ |= 0x00000001;
1247            return this;
1248          }
1249          /**
1250           * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1251           */
1252          public Builder clearJournalId() {
1253            if (journalIdBuilder_ == null) {
1254              journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
1255              onChanged();
1256            } else {
1257              journalIdBuilder_.clear();
1258            }
1259            bitField0_ = (bitField0_ & ~0x00000001);
1260            return this;
1261          }
1262          /**
1263           * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1264           */
1265          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJournalIdBuilder() {
1266            bitField0_ |= 0x00000001;
1267            onChanged();
1268            return getJournalIdFieldBuilder().getBuilder();
1269          }
1270          /**
1271           * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1272           */
1273          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJournalIdOrBuilder() {
1274            if (journalIdBuilder_ != null) {
1275              return journalIdBuilder_.getMessageOrBuilder();
1276            } else {
1277              return journalId_;
1278            }
1279          }
1280          /**
1281           * <code>required .hadoop.hdfs.JournalIdProto journalId = 1;</code>
1282           */
1283          private com.google.protobuf.SingleFieldBuilder<
1284              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
1285              getJournalIdFieldBuilder() {
1286            if (journalIdBuilder_ == null) {
1287              journalIdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
1288                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
1289                      journalId_,
1290                      getParentForChildren(),
1291                      isClean());
1292              journalId_ = null;
1293            }
1294            return journalIdBuilder_;
1295          }
1296    
1297          // required uint64 epoch = 2;
1298          private long epoch_ ;
1299          /**
1300           * <code>required uint64 epoch = 2;</code>
1301           */
1302          public boolean hasEpoch() {
1303            return ((bitField0_ & 0x00000002) == 0x00000002);
1304          }
1305          /**
1306           * <code>required uint64 epoch = 2;</code>
1307           */
1308          public long getEpoch() {
1309            return epoch_;
1310          }
1311          /**
1312           * <code>required uint64 epoch = 2;</code>
1313           */
1314          public Builder setEpoch(long value) {
1315            bitField0_ |= 0x00000002;
1316            epoch_ = value;
1317            onChanged();
1318            return this;
1319          }
1320          /**
1321           * <code>required uint64 epoch = 2;</code>
1322           */
1323          public Builder clearEpoch() {
1324            bitField0_ = (bitField0_ & ~0x00000002);
1325            epoch_ = 0L;
1326            onChanged();
1327            return this;
1328          }
1329    
1330          // required uint64 ipcSerialNumber = 3;
1331          private long ipcSerialNumber_ ;
1332          /**
1333           * <code>required uint64 ipcSerialNumber = 3;</code>
1334           */
1335          public boolean hasIpcSerialNumber() {
1336            return ((bitField0_ & 0x00000004) == 0x00000004);
1337          }
1338          /**
1339           * <code>required uint64 ipcSerialNumber = 3;</code>
1340           */
1341          public long getIpcSerialNumber() {
1342            return ipcSerialNumber_;
1343          }
1344          /**
1345           * <code>required uint64 ipcSerialNumber = 3;</code>
1346           */
1347          public Builder setIpcSerialNumber(long value) {
1348            bitField0_ |= 0x00000004;
1349            ipcSerialNumber_ = value;
1350            onChanged();
1351            return this;
1352          }
1353          /**
1354           * <code>required uint64 ipcSerialNumber = 3;</code>
1355           */
1356          public Builder clearIpcSerialNumber() {
1357            bitField0_ = (bitField0_ & ~0x00000004);
1358            ipcSerialNumber_ = 0L;
1359            onChanged();
1360            return this;
1361          }
1362    
1363          // optional uint64 committedTxId = 4;
1364          private long committedTxId_ ;
1365          /**
1366           * <code>optional uint64 committedTxId = 4;</code>
1367           *
1368           * <pre>
1369           * Whenever a writer makes a request, it informs
1370           * the node of the latest committed txid. This may
1371           * be higher than the transaction data included in the
1372           * request itself, eg in the case that the node has
1373           * fallen behind.
1374           * </pre>
1375           */
1376          public boolean hasCommittedTxId() {
1377            return ((bitField0_ & 0x00000008) == 0x00000008);
1378          }
1379          /**
1380           * <code>optional uint64 committedTxId = 4;</code>
1381           *
1382           * <pre>
1383           * Whenever a writer makes a request, it informs
1384           * the node of the latest committed txid. This may
1385           * be higher than the transaction data included in the
1386           * request itself, eg in the case that the node has
1387           * fallen behind.
1388           * </pre>
1389           */
1390          public long getCommittedTxId() {
1391            return committedTxId_;
1392          }
1393          /**
1394           * <code>optional uint64 committedTxId = 4;</code>
1395           *
1396           * <pre>
1397           * Whenever a writer makes a request, it informs
1398           * the node of the latest committed txid. This may
1399           * be higher than the transaction data included in the
1400           * request itself, eg in the case that the node has
1401           * fallen behind.
1402           * </pre>
1403           */
1404          public Builder setCommittedTxId(long value) {
1405            bitField0_ |= 0x00000008;
1406            committedTxId_ = value;
1407            onChanged();
1408            return this;
1409          }
1410          /**
1411           * <code>optional uint64 committedTxId = 4;</code>
1412           *
1413           * <pre>
1414           * Whenever a writer makes a request, it informs
1415           * the node of the latest committed txid. This may
1416           * be higher than the transaction data included in the
1417           * request itself, eg in the case that the node has
1418           * fallen behind.
1419           * </pre>
1420           */
1421          public Builder clearCommittedTxId() {
1422            bitField0_ = (bitField0_ & ~0x00000008);
1423            committedTxId_ = 0L;
1424            onChanged();
1425            return this;
1426          }
1427    
1428          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RequestInfoProto)
1429        }
1430    
1431        static {
1432          defaultInstance = new RequestInfoProto(true);
1433          defaultInstance.initFields();
1434        }
1435    
1436        // @@protoc_insertion_point(class_scope:hadoop.hdfs.RequestInfoProto)
1437      }
1438    
1439      public interface SegmentStateProtoOrBuilder
1440          extends com.google.protobuf.MessageOrBuilder {
1441    
1442        // required uint64 startTxId = 1;
1443        /**
1444         * <code>required uint64 startTxId = 1;</code>
1445         */
1446        boolean hasStartTxId();
1447        /**
1448         * <code>required uint64 startTxId = 1;</code>
1449         */
1450        long getStartTxId();
1451    
1452        // required uint64 endTxId = 2;
1453        /**
1454         * <code>required uint64 endTxId = 2;</code>
1455         */
1456        boolean hasEndTxId();
1457        /**
1458         * <code>required uint64 endTxId = 2;</code>
1459         */
1460        long getEndTxId();
1461    
1462        // required bool isInProgress = 3;
1463        /**
1464         * <code>required bool isInProgress = 3;</code>
1465         */
1466        boolean hasIsInProgress();
1467        /**
1468         * <code>required bool isInProgress = 3;</code>
1469         */
1470        boolean getIsInProgress();
1471      }
1472      /**
1473       * Protobuf type {@code hadoop.hdfs.SegmentStateProto}
1474       */
1475      public static final class SegmentStateProto extends
1476          com.google.protobuf.GeneratedMessage
1477          implements SegmentStateProtoOrBuilder {
1478        // Use SegmentStateProto.newBuilder() to construct.
1479        private SegmentStateProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
1480          super(builder);
1481          this.unknownFields = builder.getUnknownFields();
1482        }
1483        private SegmentStateProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
1484    
1485        private static final SegmentStateProto defaultInstance;
1486        public static SegmentStateProto getDefaultInstance() {
1487          return defaultInstance;
1488        }
1489    
1490        public SegmentStateProto getDefaultInstanceForType() {
1491          return defaultInstance;
1492        }
1493    
1494        private final com.google.protobuf.UnknownFieldSet unknownFields;
1495        @java.lang.Override
1496        public final com.google.protobuf.UnknownFieldSet
1497            getUnknownFields() {
1498          return this.unknownFields;
1499        }
1500        private SegmentStateProto(
1501            com.google.protobuf.CodedInputStream input,
1502            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1503            throws com.google.protobuf.InvalidProtocolBufferException {
1504          initFields();
1505          int mutable_bitField0_ = 0;
1506          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
1507              com.google.protobuf.UnknownFieldSet.newBuilder();
1508          try {
1509            boolean done = false;
1510            while (!done) {
1511              int tag = input.readTag();
1512              switch (tag) {
1513                case 0:
1514                  done = true;
1515                  break;
1516                default: {
1517                  if (!parseUnknownField(input, unknownFields,
1518                                         extensionRegistry, tag)) {
1519                    done = true;
1520                  }
1521                  break;
1522                }
1523                case 8: {
1524                  bitField0_ |= 0x00000001;
1525                  startTxId_ = input.readUInt64();
1526                  break;
1527                }
1528                case 16: {
1529                  bitField0_ |= 0x00000002;
1530                  endTxId_ = input.readUInt64();
1531                  break;
1532                }
1533                case 24: {
1534                  bitField0_ |= 0x00000004;
1535                  isInProgress_ = input.readBool();
1536                  break;
1537                }
1538              }
1539            }
1540          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1541            throw e.setUnfinishedMessage(this);
1542          } catch (java.io.IOException e) {
1543            throw new com.google.protobuf.InvalidProtocolBufferException(
1544                e.getMessage()).setUnfinishedMessage(this);
1545          } finally {
1546            this.unknownFields = unknownFields.build();
1547            makeExtensionsImmutable();
1548          }
1549        }
1550        public static final com.google.protobuf.Descriptors.Descriptor
1551            getDescriptor() {
1552          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_SegmentStateProto_descriptor;
1553        }
1554    
1555        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1556            internalGetFieldAccessorTable() {
1557          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_SegmentStateProto_fieldAccessorTable
1558              .ensureFieldAccessorsInitialized(
1559                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder.class);
1560        }
1561    
1562        public static com.google.protobuf.Parser<SegmentStateProto> PARSER =
1563            new com.google.protobuf.AbstractParser<SegmentStateProto>() {
1564          public SegmentStateProto parsePartialFrom(
1565              com.google.protobuf.CodedInputStream input,
1566              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1567              throws com.google.protobuf.InvalidProtocolBufferException {
1568            return new SegmentStateProto(input, extensionRegistry);
1569          }
1570        };
1571    
1572        @java.lang.Override
1573        public com.google.protobuf.Parser<SegmentStateProto> getParserForType() {
1574          return PARSER;
1575        }
1576    
1577        private int bitField0_;
1578        // required uint64 startTxId = 1;
1579        public static final int STARTTXID_FIELD_NUMBER = 1;
1580        private long startTxId_;
1581        /**
1582         * <code>required uint64 startTxId = 1;</code>
1583         */
1584        public boolean hasStartTxId() {
1585          return ((bitField0_ & 0x00000001) == 0x00000001);
1586        }
1587        /**
1588         * <code>required uint64 startTxId = 1;</code>
1589         */
1590        public long getStartTxId() {
1591          return startTxId_;
1592        }
1593    
1594        // required uint64 endTxId = 2;
1595        public static final int ENDTXID_FIELD_NUMBER = 2;
1596        private long endTxId_;
1597        /**
1598         * <code>required uint64 endTxId = 2;</code>
1599         */
1600        public boolean hasEndTxId() {
1601          return ((bitField0_ & 0x00000002) == 0x00000002);
1602        }
1603        /**
1604         * <code>required uint64 endTxId = 2;</code>
1605         */
1606        public long getEndTxId() {
1607          return endTxId_;
1608        }
1609    
1610        // required bool isInProgress = 3;
1611        public static final int ISINPROGRESS_FIELD_NUMBER = 3;
1612        private boolean isInProgress_;
1613        /**
1614         * <code>required bool isInProgress = 3;</code>
1615         */
1616        public boolean hasIsInProgress() {
1617          return ((bitField0_ & 0x00000004) == 0x00000004);
1618        }
1619        /**
1620         * <code>required bool isInProgress = 3;</code>
1621         */
1622        public boolean getIsInProgress() {
1623          return isInProgress_;
1624        }
1625    
1626        private void initFields() {
1627          startTxId_ = 0L;
1628          endTxId_ = 0L;
1629          isInProgress_ = false;
1630        }
1631        private byte memoizedIsInitialized = -1;
1632        public final boolean isInitialized() {
1633          byte isInitialized = memoizedIsInitialized;
1634          if (isInitialized != -1) return isInitialized == 1;
1635    
1636          if (!hasStartTxId()) {
1637            memoizedIsInitialized = 0;
1638            return false;
1639          }
1640          if (!hasEndTxId()) {
1641            memoizedIsInitialized = 0;
1642            return false;
1643          }
1644          if (!hasIsInProgress()) {
1645            memoizedIsInitialized = 0;
1646            return false;
1647          }
1648          memoizedIsInitialized = 1;
1649          return true;
1650        }
1651    
1652        public void writeTo(com.google.protobuf.CodedOutputStream output)
1653                            throws java.io.IOException {
1654          getSerializedSize();
1655          if (((bitField0_ & 0x00000001) == 0x00000001)) {
1656            output.writeUInt64(1, startTxId_);
1657          }
1658          if (((bitField0_ & 0x00000002) == 0x00000002)) {
1659            output.writeUInt64(2, endTxId_);
1660          }
1661          if (((bitField0_ & 0x00000004) == 0x00000004)) {
1662            output.writeBool(3, isInProgress_);
1663          }
1664          getUnknownFields().writeTo(output);
1665        }
1666    
1667        private int memoizedSerializedSize = -1;
1668        public int getSerializedSize() {
1669          int size = memoizedSerializedSize;
1670          if (size != -1) return size;
1671    
1672          size = 0;
1673          if (((bitField0_ & 0x00000001) == 0x00000001)) {
1674            size += com.google.protobuf.CodedOutputStream
1675              .computeUInt64Size(1, startTxId_);
1676          }
1677          if (((bitField0_ & 0x00000002) == 0x00000002)) {
1678            size += com.google.protobuf.CodedOutputStream
1679              .computeUInt64Size(2, endTxId_);
1680          }
1681          if (((bitField0_ & 0x00000004) == 0x00000004)) {
1682            size += com.google.protobuf.CodedOutputStream
1683              .computeBoolSize(3, isInProgress_);
1684          }
1685          size += getUnknownFields().getSerializedSize();
1686          memoizedSerializedSize = size;
1687          return size;
1688        }
1689    
1690        private static final long serialVersionUID = 0L;
1691        @java.lang.Override
1692        protected java.lang.Object writeReplace()
1693            throws java.io.ObjectStreamException {
1694          return super.writeReplace();
1695        }
1696    
1697        @java.lang.Override
1698        public boolean equals(final java.lang.Object obj) {
1699          if (obj == this) {
1700           return true;
1701          }
1702          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto)) {
1703            return super.equals(obj);
1704          }
1705          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto) obj;
1706    
1707          boolean result = true;
1708          result = result && (hasStartTxId() == other.hasStartTxId());
1709          if (hasStartTxId()) {
1710            result = result && (getStartTxId()
1711                == other.getStartTxId());
1712          }
1713          result = result && (hasEndTxId() == other.hasEndTxId());
1714          if (hasEndTxId()) {
1715            result = result && (getEndTxId()
1716                == other.getEndTxId());
1717          }
1718          result = result && (hasIsInProgress() == other.hasIsInProgress());
1719          if (hasIsInProgress()) {
1720            result = result && (getIsInProgress()
1721                == other.getIsInProgress());
1722          }
1723          result = result &&
1724              getUnknownFields().equals(other.getUnknownFields());
1725          return result;
1726        }
1727    
1728        private int memoizedHashCode = 0;
1729        @java.lang.Override
1730        public int hashCode() {
1731          if (memoizedHashCode != 0) {
1732            return memoizedHashCode;
1733          }
1734          int hash = 41;
1735          hash = (19 * hash) + getDescriptorForType().hashCode();
1736          if (hasStartTxId()) {
1737            hash = (37 * hash) + STARTTXID_FIELD_NUMBER;
1738            hash = (53 * hash) + hashLong(getStartTxId());
1739          }
1740          if (hasEndTxId()) {
1741            hash = (37 * hash) + ENDTXID_FIELD_NUMBER;
1742            hash = (53 * hash) + hashLong(getEndTxId());
1743          }
1744          if (hasIsInProgress()) {
1745            hash = (37 * hash) + ISINPROGRESS_FIELD_NUMBER;
1746            hash = (53 * hash) + hashBoolean(getIsInProgress());
1747          }
1748          hash = (29 * hash) + getUnknownFields().hashCode();
1749          memoizedHashCode = hash;
1750          return hash;
1751        }
1752    
1753        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1754            com.google.protobuf.ByteString data)
1755            throws com.google.protobuf.InvalidProtocolBufferException {
1756          return PARSER.parseFrom(data);
1757        }
1758        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1759            com.google.protobuf.ByteString data,
1760            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1761            throws com.google.protobuf.InvalidProtocolBufferException {
1762          return PARSER.parseFrom(data, extensionRegistry);
1763        }
1764        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(byte[] data)
1765            throws com.google.protobuf.InvalidProtocolBufferException {
1766          return PARSER.parseFrom(data);
1767        }
1768        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1769            byte[] data,
1770            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1771            throws com.google.protobuf.InvalidProtocolBufferException {
1772          return PARSER.parseFrom(data, extensionRegistry);
1773        }
1774        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(java.io.InputStream input)
1775            throws java.io.IOException {
1776          return PARSER.parseFrom(input);
1777        }
1778        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1779            java.io.InputStream input,
1780            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1781            throws java.io.IOException {
1782          return PARSER.parseFrom(input, extensionRegistry);
1783        }
1784        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseDelimitedFrom(java.io.InputStream input)
1785            throws java.io.IOException {
1786          return PARSER.parseDelimitedFrom(input);
1787        }
1788        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseDelimitedFrom(
1789            java.io.InputStream input,
1790            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1791            throws java.io.IOException {
1792          return PARSER.parseDelimitedFrom(input, extensionRegistry);
1793        }
1794        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1795            com.google.protobuf.CodedInputStream input)
1796            throws java.io.IOException {
1797          return PARSER.parseFrom(input);
1798        }
1799        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1800            com.google.protobuf.CodedInputStream input,
1801            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1802            throws java.io.IOException {
1803          return PARSER.parseFrom(input, extensionRegistry);
1804        }
1805    
1806        public static Builder newBuilder() { return Builder.create(); }
1807        public Builder newBuilderForType() { return newBuilder(); }
1808        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto prototype) {
1809          return newBuilder().mergeFrom(prototype);
1810        }
1811        public Builder toBuilder() { return newBuilder(this); }
1812    
1813        @java.lang.Override
1814        protected Builder newBuilderForType(
1815            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1816          Builder builder = new Builder(parent);
1817          return builder;
1818        }
1819        /**
1820         * Protobuf type {@code hadoop.hdfs.SegmentStateProto}
1821         */
1822        public static final class Builder extends
1823            com.google.protobuf.GeneratedMessage.Builder<Builder>
1824           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder {
1825          public static final com.google.protobuf.Descriptors.Descriptor
1826              getDescriptor() {
1827            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_SegmentStateProto_descriptor;
1828          }
1829    
1830          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1831              internalGetFieldAccessorTable() {
1832            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_SegmentStateProto_fieldAccessorTable
1833                .ensureFieldAccessorsInitialized(
1834                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder.class);
1835          }
1836    
1837          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder()
1838          private Builder() {
1839            maybeForceBuilderInitialization();
1840          }
1841    
1842          private Builder(
1843              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1844            super(parent);
1845            maybeForceBuilderInitialization();
1846          }
1847          private void maybeForceBuilderInitialization() {
1848            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
1849            }
1850          }
1851          private static Builder create() {
1852            return new Builder();
1853          }
1854    
1855          public Builder clear() {
1856            super.clear();
1857            startTxId_ = 0L;
1858            bitField0_ = (bitField0_ & ~0x00000001);
1859            endTxId_ = 0L;
1860            bitField0_ = (bitField0_ & ~0x00000002);
1861            isInProgress_ = false;
1862            bitField0_ = (bitField0_ & ~0x00000004);
1863            return this;
1864          }
1865    
1866          public Builder clone() {
1867            return create().mergeFrom(buildPartial());
1868          }
1869    
1870          public com.google.protobuf.Descriptors.Descriptor
1871              getDescriptorForType() {
1872            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_SegmentStateProto_descriptor;
1873          }
1874    
1875          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getDefaultInstanceForType() {
1876            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
1877          }
1878    
1879          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto build() {
1880            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto result = buildPartial();
1881            if (!result.isInitialized()) {
1882              throw newUninitializedMessageException(result);
1883            }
1884            return result;
1885          }
1886    
1887          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto buildPartial() {
1888            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto(this);
1889            int from_bitField0_ = bitField0_;
1890            int to_bitField0_ = 0;
1891            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
1892              to_bitField0_ |= 0x00000001;
1893            }
1894            result.startTxId_ = startTxId_;
1895            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
1896              to_bitField0_ |= 0x00000002;
1897            }
1898            result.endTxId_ = endTxId_;
1899            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
1900              to_bitField0_ |= 0x00000004;
1901            }
1902            result.isInProgress_ = isInProgress_;
1903            result.bitField0_ = to_bitField0_;
1904            onBuilt();
1905            return result;
1906          }
1907    
1908          public Builder mergeFrom(com.google.protobuf.Message other) {
1909            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto) {
1910              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto)other);
1911            } else {
1912              super.mergeFrom(other);
1913              return this;
1914            }
1915          }
1916    
1917          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto other) {
1918            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) return this;
1919            if (other.hasStartTxId()) {
1920              setStartTxId(other.getStartTxId());
1921            }
1922            if (other.hasEndTxId()) {
1923              setEndTxId(other.getEndTxId());
1924            }
1925            if (other.hasIsInProgress()) {
1926              setIsInProgress(other.getIsInProgress());
1927            }
1928            this.mergeUnknownFields(other.getUnknownFields());
1929            return this;
1930          }
1931    
1932          public final boolean isInitialized() {
1933            if (!hasStartTxId()) {
1934              
1935              return false;
1936            }
1937            if (!hasEndTxId()) {
1938              
1939              return false;
1940            }
1941            if (!hasIsInProgress()) {
1942              
1943              return false;
1944            }
1945            return true;
1946          }
1947    
1948          public Builder mergeFrom(
1949              com.google.protobuf.CodedInputStream input,
1950              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1951              throws java.io.IOException {
1952            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parsedMessage = null;
1953            try {
1954              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
1955            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1956              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto) e.getUnfinishedMessage();
1957              throw e;
1958            } finally {
1959              if (parsedMessage != null) {
1960                mergeFrom(parsedMessage);
1961              }
1962            }
1963            return this;
1964          }
1965          private int bitField0_;
1966    
1967          // required uint64 startTxId = 1;
1968          private long startTxId_ ;
1969          /**
1970           * <code>required uint64 startTxId = 1;</code>
1971           */
1972          public boolean hasStartTxId() {
1973            return ((bitField0_ & 0x00000001) == 0x00000001);
1974          }
1975          /**
1976           * <code>required uint64 startTxId = 1;</code>
1977           */
1978          public long getStartTxId() {
1979            return startTxId_;
1980          }
1981          /**
1982           * <code>required uint64 startTxId = 1;</code>
1983           */
1984          public Builder setStartTxId(long value) {
1985            bitField0_ |= 0x00000001;
1986            startTxId_ = value;
1987            onChanged();
1988            return this;
1989          }
1990          /**
1991           * <code>required uint64 startTxId = 1;</code>
1992           */
1993          public Builder clearStartTxId() {
1994            bitField0_ = (bitField0_ & ~0x00000001);
1995            startTxId_ = 0L;
1996            onChanged();
1997            return this;
1998          }
1999    
2000          // required uint64 endTxId = 2;
2001          private long endTxId_ ;
2002          /**
2003           * <code>required uint64 endTxId = 2;</code>
2004           */
2005          public boolean hasEndTxId() {
2006            return ((bitField0_ & 0x00000002) == 0x00000002);
2007          }
2008          /**
2009           * <code>required uint64 endTxId = 2;</code>
2010           */
2011          public long getEndTxId() {
2012            return endTxId_;
2013          }
2014          /**
2015           * <code>required uint64 endTxId = 2;</code>
2016           */
2017          public Builder setEndTxId(long value) {
2018            bitField0_ |= 0x00000002;
2019            endTxId_ = value;
2020            onChanged();
2021            return this;
2022          }
2023          /**
2024           * <code>required uint64 endTxId = 2;</code>
2025           */
2026          public Builder clearEndTxId() {
2027            bitField0_ = (bitField0_ & ~0x00000002);
2028            endTxId_ = 0L;
2029            onChanged();
2030            return this;
2031          }
2032    
2033          // required bool isInProgress = 3;
2034          private boolean isInProgress_ ;
2035          /**
2036           * <code>required bool isInProgress = 3;</code>
2037           */
2038          public boolean hasIsInProgress() {
2039            return ((bitField0_ & 0x00000004) == 0x00000004);
2040          }
2041          /**
2042           * <code>required bool isInProgress = 3;</code>
2043           */
2044          public boolean getIsInProgress() {
2045            return isInProgress_;
2046          }
2047          /**
2048           * <code>required bool isInProgress = 3;</code>
2049           */
2050          public Builder setIsInProgress(boolean value) {
2051            bitField0_ |= 0x00000004;
2052            isInProgress_ = value;
2053            onChanged();
2054            return this;
2055          }
2056          /**
2057           * <code>required bool isInProgress = 3;</code>
2058           */
2059          public Builder clearIsInProgress() {
2060            bitField0_ = (bitField0_ & ~0x00000004);
2061            isInProgress_ = false;
2062            onChanged();
2063            return this;
2064          }
2065    
2066          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SegmentStateProto)
2067        }
2068    
2069        static {
2070          defaultInstance = new SegmentStateProto(true);
2071          defaultInstance.initFields();
2072        }
2073    
2074        // @@protoc_insertion_point(class_scope:hadoop.hdfs.SegmentStateProto)
2075      }
2076    
2077      public interface PersistedRecoveryPaxosDataOrBuilder
2078          extends com.google.protobuf.MessageOrBuilder {
2079    
2080        // required .hadoop.hdfs.SegmentStateProto segmentState = 1;
2081        /**
2082         * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2083         */
2084        boolean hasSegmentState();
2085        /**
2086         * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2087         */
2088        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState();
2089        /**
2090         * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2091         */
2092        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder();
2093    
2094        // required uint64 acceptedInEpoch = 2;
2095        /**
2096         * <code>required uint64 acceptedInEpoch = 2;</code>
2097         */
2098        boolean hasAcceptedInEpoch();
2099        /**
2100         * <code>required uint64 acceptedInEpoch = 2;</code>
2101         */
2102        long getAcceptedInEpoch();
2103      }
2104      /**
2105       * Protobuf type {@code hadoop.hdfs.PersistedRecoveryPaxosData}
2106       *
2107       * <pre>
2108       **
2109       * The storage format used on local disk for previously
2110       * accepted decisions.
2111       * </pre>
2112       */
2113      public static final class PersistedRecoveryPaxosData extends
2114          com.google.protobuf.GeneratedMessage
2115          implements PersistedRecoveryPaxosDataOrBuilder {
2116        // Use PersistedRecoveryPaxosData.newBuilder() to construct.
2117        private PersistedRecoveryPaxosData(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
2118          super(builder);
2119          this.unknownFields = builder.getUnknownFields();
2120        }
2121        private PersistedRecoveryPaxosData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
2122    
2123        private static final PersistedRecoveryPaxosData defaultInstance;
2124        public static PersistedRecoveryPaxosData getDefaultInstance() {
2125          return defaultInstance;
2126        }
2127    
2128        public PersistedRecoveryPaxosData getDefaultInstanceForType() {
2129          return defaultInstance;
2130        }
2131    
2132        private final com.google.protobuf.UnknownFieldSet unknownFields;
2133        @java.lang.Override
2134        public final com.google.protobuf.UnknownFieldSet
2135            getUnknownFields() {
2136          return this.unknownFields;
2137        }
2138        private PersistedRecoveryPaxosData(
2139            com.google.protobuf.CodedInputStream input,
2140            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2141            throws com.google.protobuf.InvalidProtocolBufferException {
2142          initFields();
2143          int mutable_bitField0_ = 0;
2144          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
2145              com.google.protobuf.UnknownFieldSet.newBuilder();
2146          try {
2147            boolean done = false;
2148            while (!done) {
2149              int tag = input.readTag();
2150              switch (tag) {
2151                case 0:
2152                  done = true;
2153                  break;
2154                default: {
2155                  if (!parseUnknownField(input, unknownFields,
2156                                         extensionRegistry, tag)) {
2157                    done = true;
2158                  }
2159                  break;
2160                }
2161                case 10: {
2162                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder subBuilder = null;
2163                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
2164                    subBuilder = segmentState_.toBuilder();
2165                  }
2166                  segmentState_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.PARSER, extensionRegistry);
2167                  if (subBuilder != null) {
2168                    subBuilder.mergeFrom(segmentState_);
2169                    segmentState_ = subBuilder.buildPartial();
2170                  }
2171                  bitField0_ |= 0x00000001;
2172                  break;
2173                }
2174                case 16: {
2175                  bitField0_ |= 0x00000002;
2176                  acceptedInEpoch_ = input.readUInt64();
2177                  break;
2178                }
2179              }
2180            }
2181          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
2182            throw e.setUnfinishedMessage(this);
2183          } catch (java.io.IOException e) {
2184            throw new com.google.protobuf.InvalidProtocolBufferException(
2185                e.getMessage()).setUnfinishedMessage(this);
2186          } finally {
2187            this.unknownFields = unknownFields.build();
2188            makeExtensionsImmutable();
2189          }
2190        }
2191        public static final com.google.protobuf.Descriptors.Descriptor
2192            getDescriptor() {
2193          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor;
2194        }
2195    
2196        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
2197            internalGetFieldAccessorTable() {
2198          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_fieldAccessorTable
2199              .ensureFieldAccessorsInitialized(
2200                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.Builder.class);
2201        }
2202    
2203        public static com.google.protobuf.Parser<PersistedRecoveryPaxosData> PARSER =
2204            new com.google.protobuf.AbstractParser<PersistedRecoveryPaxosData>() {
2205          public PersistedRecoveryPaxosData parsePartialFrom(
2206              com.google.protobuf.CodedInputStream input,
2207              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2208              throws com.google.protobuf.InvalidProtocolBufferException {
2209            return new PersistedRecoveryPaxosData(input, extensionRegistry);
2210          }
2211        };
2212    
2213        @java.lang.Override
2214        public com.google.protobuf.Parser<PersistedRecoveryPaxosData> getParserForType() {
2215          return PARSER;
2216        }
2217    
2218        private int bitField0_;
2219        // required .hadoop.hdfs.SegmentStateProto segmentState = 1;
2220        public static final int SEGMENTSTATE_FIELD_NUMBER = 1;
2221        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_;
2222        /**
2223         * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2224         */
2225        public boolean hasSegmentState() {
2226          return ((bitField0_ & 0x00000001) == 0x00000001);
2227        }
2228        /**
2229         * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2230         */
2231        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
2232          return segmentState_;
2233        }
2234        /**
2235         * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2236         */
2237        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
2238          return segmentState_;
2239        }
2240    
2241        // required uint64 acceptedInEpoch = 2;
2242        public static final int ACCEPTEDINEPOCH_FIELD_NUMBER = 2;
2243        private long acceptedInEpoch_;
2244        /**
2245         * <code>required uint64 acceptedInEpoch = 2;</code>
2246         */
2247        public boolean hasAcceptedInEpoch() {
2248          return ((bitField0_ & 0x00000002) == 0x00000002);
2249        }
2250        /**
2251         * <code>required uint64 acceptedInEpoch = 2;</code>
2252         */
2253        public long getAcceptedInEpoch() {
2254          return acceptedInEpoch_;
2255        }
2256    
2257        private void initFields() {
2258          segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
2259          acceptedInEpoch_ = 0L;
2260        }
2261        private byte memoizedIsInitialized = -1;
2262        public final boolean isInitialized() {
2263          byte isInitialized = memoizedIsInitialized;
2264          if (isInitialized != -1) return isInitialized == 1;
2265    
2266          if (!hasSegmentState()) {
2267            memoizedIsInitialized = 0;
2268            return false;
2269          }
2270          if (!hasAcceptedInEpoch()) {
2271            memoizedIsInitialized = 0;
2272            return false;
2273          }
2274          if (!getSegmentState().isInitialized()) {
2275            memoizedIsInitialized = 0;
2276            return false;
2277          }
2278          memoizedIsInitialized = 1;
2279          return true;
2280        }
2281    
2282        public void writeTo(com.google.protobuf.CodedOutputStream output)
2283                            throws java.io.IOException {
2284          getSerializedSize();
2285          if (((bitField0_ & 0x00000001) == 0x00000001)) {
2286            output.writeMessage(1, segmentState_);
2287          }
2288          if (((bitField0_ & 0x00000002) == 0x00000002)) {
2289            output.writeUInt64(2, acceptedInEpoch_);
2290          }
2291          getUnknownFields().writeTo(output);
2292        }
2293    
2294        private int memoizedSerializedSize = -1;
2295        public int getSerializedSize() {
2296          int size = memoizedSerializedSize;
2297          if (size != -1) return size;
2298    
2299          size = 0;
2300          if (((bitField0_ & 0x00000001) == 0x00000001)) {
2301            size += com.google.protobuf.CodedOutputStream
2302              .computeMessageSize(1, segmentState_);
2303          }
2304          if (((bitField0_ & 0x00000002) == 0x00000002)) {
2305            size += com.google.protobuf.CodedOutputStream
2306              .computeUInt64Size(2, acceptedInEpoch_);
2307          }
2308          size += getUnknownFields().getSerializedSize();
2309          memoizedSerializedSize = size;
2310          return size;
2311        }
2312    
2313        private static final long serialVersionUID = 0L;
2314        @java.lang.Override
2315        protected java.lang.Object writeReplace()
2316            throws java.io.ObjectStreamException {
2317          return super.writeReplace();
2318        }
2319    
2320        @java.lang.Override
2321        public boolean equals(final java.lang.Object obj) {
2322          if (obj == this) {
2323           return true;
2324          }
2325          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData)) {
2326            return super.equals(obj);
2327          }
2328          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData) obj;
2329    
2330          boolean result = true;
2331          result = result && (hasSegmentState() == other.hasSegmentState());
2332          if (hasSegmentState()) {
2333            result = result && getSegmentState()
2334                .equals(other.getSegmentState());
2335          }
2336          result = result && (hasAcceptedInEpoch() == other.hasAcceptedInEpoch());
2337          if (hasAcceptedInEpoch()) {
2338            result = result && (getAcceptedInEpoch()
2339                == other.getAcceptedInEpoch());
2340          }
2341          result = result &&
2342              getUnknownFields().equals(other.getUnknownFields());
2343          return result;
2344        }
2345    
2346        private int memoizedHashCode = 0;
2347        @java.lang.Override
2348        public int hashCode() {
2349          if (memoizedHashCode != 0) {
2350            return memoizedHashCode;
2351          }
2352          int hash = 41;
2353          hash = (19 * hash) + getDescriptorForType().hashCode();
2354          if (hasSegmentState()) {
2355            hash = (37 * hash) + SEGMENTSTATE_FIELD_NUMBER;
2356            hash = (53 * hash) + getSegmentState().hashCode();
2357          }
2358          if (hasAcceptedInEpoch()) {
2359            hash = (37 * hash) + ACCEPTEDINEPOCH_FIELD_NUMBER;
2360            hash = (53 * hash) + hashLong(getAcceptedInEpoch());
2361          }
2362          hash = (29 * hash) + getUnknownFields().hashCode();
2363          memoizedHashCode = hash;
2364          return hash;
2365        }
2366    
2367        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2368            com.google.protobuf.ByteString data)
2369            throws com.google.protobuf.InvalidProtocolBufferException {
2370          return PARSER.parseFrom(data);
2371        }
2372        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2373            com.google.protobuf.ByteString data,
2374            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2375            throws com.google.protobuf.InvalidProtocolBufferException {
2376          return PARSER.parseFrom(data, extensionRegistry);
2377        }
2378        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(byte[] data)
2379            throws com.google.protobuf.InvalidProtocolBufferException {
2380          return PARSER.parseFrom(data);
2381        }
2382        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2383            byte[] data,
2384            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2385            throws com.google.protobuf.InvalidProtocolBufferException {
2386          return PARSER.parseFrom(data, extensionRegistry);
2387        }
2388        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(java.io.InputStream input)
2389            throws java.io.IOException {
2390          return PARSER.parseFrom(input);
2391        }
2392        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2393            java.io.InputStream input,
2394            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2395            throws java.io.IOException {
2396          return PARSER.parseFrom(input, extensionRegistry);
2397        }
2398        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseDelimitedFrom(java.io.InputStream input)
2399            throws java.io.IOException {
2400          return PARSER.parseDelimitedFrom(input);
2401        }
2402        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseDelimitedFrom(
2403            java.io.InputStream input,
2404            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2405            throws java.io.IOException {
2406          return PARSER.parseDelimitedFrom(input, extensionRegistry);
2407        }
2408        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2409            com.google.protobuf.CodedInputStream input)
2410            throws java.io.IOException {
2411          return PARSER.parseFrom(input);
2412        }
2413        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2414            com.google.protobuf.CodedInputStream input,
2415            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2416            throws java.io.IOException {
2417          return PARSER.parseFrom(input, extensionRegistry);
2418        }
2419    
2420        public static Builder newBuilder() { return Builder.create(); }
2421        public Builder newBuilderForType() { return newBuilder(); }
2422        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData prototype) {
2423          return newBuilder().mergeFrom(prototype);
2424        }
2425        public Builder toBuilder() { return newBuilder(this); }
2426    
2427        @java.lang.Override
2428        protected Builder newBuilderForType(
2429            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
2430          Builder builder = new Builder(parent);
2431          return builder;
2432        }
2433        /**
2434         * Protobuf type {@code hadoop.hdfs.PersistedRecoveryPaxosData}
2435         *
2436         * <pre>
2437         **
2438         * The storage format used on local disk for previously
2439         * accepted decisions.
2440         * </pre>
2441         */
2442        public static final class Builder extends
2443            com.google.protobuf.GeneratedMessage.Builder<Builder>
2444           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosDataOrBuilder {
2445          public static final com.google.protobuf.Descriptors.Descriptor
2446              getDescriptor() {
2447            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor;
2448          }
2449    
2450          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
2451              internalGetFieldAccessorTable() {
2452            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_fieldAccessorTable
2453                .ensureFieldAccessorsInitialized(
2454                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.Builder.class);
2455          }
2456    
2457          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.newBuilder()
2458          private Builder() {
2459            maybeForceBuilderInitialization();
2460          }
2461    
2462          private Builder(
2463              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
2464            super(parent);
2465            maybeForceBuilderInitialization();
2466          }
2467          private void maybeForceBuilderInitialization() {
2468            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
2469              getSegmentStateFieldBuilder();
2470            }
2471          }
2472          private static Builder create() {
2473            return new Builder();
2474          }
2475    
2476          public Builder clear() {
2477            super.clear();
2478            if (segmentStateBuilder_ == null) {
2479              segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
2480            } else {
2481              segmentStateBuilder_.clear();
2482            }
2483            bitField0_ = (bitField0_ & ~0x00000001);
2484            acceptedInEpoch_ = 0L;
2485            bitField0_ = (bitField0_ & ~0x00000002);
2486            return this;
2487          }
2488    
2489          public Builder clone() {
2490            return create().mergeFrom(buildPartial());
2491          }
2492    
2493          public com.google.protobuf.Descriptors.Descriptor
2494              getDescriptorForType() {
2495            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor;
2496          }
2497    
2498          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData getDefaultInstanceForType() {
2499            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.getDefaultInstance();
2500          }
2501    
2502          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData build() {
2503            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData result = buildPartial();
2504            if (!result.isInitialized()) {
2505              throw newUninitializedMessageException(result);
2506            }
2507            return result;
2508          }
2509    
2510          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData buildPartial() {
2511            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData(this);
2512            int from_bitField0_ = bitField0_;
2513            int to_bitField0_ = 0;
2514            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
2515              to_bitField0_ |= 0x00000001;
2516            }
2517            if (segmentStateBuilder_ == null) {
2518              result.segmentState_ = segmentState_;
2519            } else {
2520              result.segmentState_ = segmentStateBuilder_.build();
2521            }
2522            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
2523              to_bitField0_ |= 0x00000002;
2524            }
2525            result.acceptedInEpoch_ = acceptedInEpoch_;
2526            result.bitField0_ = to_bitField0_;
2527            onBuilt();
2528            return result;
2529          }
2530    
2531          public Builder mergeFrom(com.google.protobuf.Message other) {
2532            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData) {
2533              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData)other);
2534            } else {
2535              super.mergeFrom(other);
2536              return this;
2537            }
2538          }
2539    
2540          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData other) {
2541            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.getDefaultInstance()) return this;
2542            if (other.hasSegmentState()) {
2543              mergeSegmentState(other.getSegmentState());
2544            }
2545            if (other.hasAcceptedInEpoch()) {
2546              setAcceptedInEpoch(other.getAcceptedInEpoch());
2547            }
2548            this.mergeUnknownFields(other.getUnknownFields());
2549            return this;
2550          }
2551    
2552          public final boolean isInitialized() {
2553            if (!hasSegmentState()) {
2554              
2555              return false;
2556            }
2557            if (!hasAcceptedInEpoch()) {
2558              
2559              return false;
2560            }
2561            if (!getSegmentState().isInitialized()) {
2562              
2563              return false;
2564            }
2565            return true;
2566          }
2567    
2568          public Builder mergeFrom(
2569              com.google.protobuf.CodedInputStream input,
2570              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2571              throws java.io.IOException {
2572            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parsedMessage = null;
2573            try {
2574              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
2575            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
2576              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData) e.getUnfinishedMessage();
2577              throw e;
2578            } finally {
2579              if (parsedMessage != null) {
2580                mergeFrom(parsedMessage);
2581              }
2582            }
2583            return this;
2584          }
2585          private int bitField0_;
2586    
2587          // required .hadoop.hdfs.SegmentStateProto segmentState = 1;
2588          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
2589          private com.google.protobuf.SingleFieldBuilder<
2590              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> segmentStateBuilder_;
2591          /**
2592           * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2593           */
2594          public boolean hasSegmentState() {
2595            return ((bitField0_ & 0x00000001) == 0x00000001);
2596          }
2597          /**
2598           * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2599           */
2600          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
2601            if (segmentStateBuilder_ == null) {
2602              return segmentState_;
2603            } else {
2604              return segmentStateBuilder_.getMessage();
2605            }
2606          }
2607          /**
2608           * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2609           */
2610          public Builder setSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
2611            if (segmentStateBuilder_ == null) {
2612              if (value == null) {
2613                throw new NullPointerException();
2614              }
2615              segmentState_ = value;
2616              onChanged();
2617            } else {
2618              segmentStateBuilder_.setMessage(value);
2619            }
2620            bitField0_ |= 0x00000001;
2621            return this;
2622          }
2623          /**
2624           * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2625           */
2626          public Builder setSegmentState(
2627              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) {
2628            if (segmentStateBuilder_ == null) {
2629              segmentState_ = builderForValue.build();
2630              onChanged();
2631            } else {
2632              segmentStateBuilder_.setMessage(builderForValue.build());
2633            }
2634            bitField0_ |= 0x00000001;
2635            return this;
2636          }
2637          /**
2638           * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2639           */
2640          public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
2641            if (segmentStateBuilder_ == null) {
2642              if (((bitField0_ & 0x00000001) == 0x00000001) &&
2643                  segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
2644                segmentState_ =
2645                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial();
2646              } else {
2647                segmentState_ = value;
2648              }
2649              onChanged();
2650            } else {
2651              segmentStateBuilder_.mergeFrom(value);
2652            }
2653            bitField0_ |= 0x00000001;
2654            return this;
2655          }
2656          /**
2657           * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2658           */
2659          public Builder clearSegmentState() {
2660            if (segmentStateBuilder_ == null) {
2661              segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
2662              onChanged();
2663            } else {
2664              segmentStateBuilder_.clear();
2665            }
2666            bitField0_ = (bitField0_ & ~0x00000001);
2667            return this;
2668          }
2669          /**
2670           * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2671           */
2672          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getSegmentStateBuilder() {
2673            bitField0_ |= 0x00000001;
2674            onChanged();
2675            return getSegmentStateFieldBuilder().getBuilder();
2676          }
2677          /**
2678           * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2679           */
2680          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
2681            if (segmentStateBuilder_ != null) {
2682              return segmentStateBuilder_.getMessageOrBuilder();
2683            } else {
2684              return segmentState_;
2685            }
2686          }
2687          /**
2688           * <code>required .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
2689           */
2690          private com.google.protobuf.SingleFieldBuilder<
2691              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> 
2692              getSegmentStateFieldBuilder() {
2693            if (segmentStateBuilder_ == null) {
2694              segmentStateBuilder_ = new com.google.protobuf.SingleFieldBuilder<
2695                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>(
2696                      segmentState_,
2697                      getParentForChildren(),
2698                      isClean());
2699              segmentState_ = null;
2700            }
2701            return segmentStateBuilder_;
2702          }
2703    
2704          // required uint64 acceptedInEpoch = 2;
2705          private long acceptedInEpoch_ ;
2706          /**
2707           * <code>required uint64 acceptedInEpoch = 2;</code>
2708           */
2709          public boolean hasAcceptedInEpoch() {
2710            return ((bitField0_ & 0x00000002) == 0x00000002);
2711          }
2712          /**
2713           * <code>required uint64 acceptedInEpoch = 2;</code>
2714           */
2715          public long getAcceptedInEpoch() {
2716            return acceptedInEpoch_;
2717          }
2718          /**
2719           * <code>required uint64 acceptedInEpoch = 2;</code>
2720           */
2721          public Builder setAcceptedInEpoch(long value) {
2722            bitField0_ |= 0x00000002;
2723            acceptedInEpoch_ = value;
2724            onChanged();
2725            return this;
2726          }
2727          /**
2728           * <code>required uint64 acceptedInEpoch = 2;</code>
2729           */
2730          public Builder clearAcceptedInEpoch() {
2731            bitField0_ = (bitField0_ & ~0x00000002);
2732            acceptedInEpoch_ = 0L;
2733            onChanged();
2734            return this;
2735          }
2736    
2737          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PersistedRecoveryPaxosData)
2738        }
2739    
2740        static {
2741          defaultInstance = new PersistedRecoveryPaxosData(true);
2742          defaultInstance.initFields();
2743        }
2744    
2745        // @@protoc_insertion_point(class_scope:hadoop.hdfs.PersistedRecoveryPaxosData)
2746      }
2747    
2748      public interface JournalRequestProtoOrBuilder
2749          extends com.google.protobuf.MessageOrBuilder {
2750    
2751        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
2752        /**
2753         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
2754         */
2755        boolean hasReqInfo();
2756        /**
2757         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
2758         */
2759        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
2760        /**
2761         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
2762         */
2763        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
2764    
2765        // required uint64 firstTxnId = 2;
2766        /**
2767         * <code>required uint64 firstTxnId = 2;</code>
2768         */
2769        boolean hasFirstTxnId();
2770        /**
2771         * <code>required uint64 firstTxnId = 2;</code>
2772         */
2773        long getFirstTxnId();
2774    
2775        // required uint32 numTxns = 3;
2776        /**
2777         * <code>required uint32 numTxns = 3;</code>
2778         */
2779        boolean hasNumTxns();
2780        /**
2781         * <code>required uint32 numTxns = 3;</code>
2782         */
2783        int getNumTxns();
2784    
2785        // required bytes records = 4;
2786        /**
2787         * <code>required bytes records = 4;</code>
2788         */
2789        boolean hasRecords();
2790        /**
2791         * <code>required bytes records = 4;</code>
2792         */
2793        com.google.protobuf.ByteString getRecords();
2794    
2795        // required uint64 segmentTxnId = 5;
2796        /**
2797         * <code>required uint64 segmentTxnId = 5;</code>
2798         */
2799        boolean hasSegmentTxnId();
2800        /**
2801         * <code>required uint64 segmentTxnId = 5;</code>
2802         */
2803        long getSegmentTxnId();
2804      }
2805      /**
2806       * Protobuf type {@code hadoop.hdfs.JournalRequestProto}
2807       */
2808      public static final class JournalRequestProto extends
2809          com.google.protobuf.GeneratedMessage
2810          implements JournalRequestProtoOrBuilder {
2811        // Use JournalRequestProto.newBuilder() to construct.
2812        private JournalRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
2813          super(builder);
2814          this.unknownFields = builder.getUnknownFields();
2815        }
2816        private JournalRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
2817    
2818        private static final JournalRequestProto defaultInstance;
2819        public static JournalRequestProto getDefaultInstance() {
2820          return defaultInstance;
2821        }
2822    
2823        public JournalRequestProto getDefaultInstanceForType() {
2824          return defaultInstance;
2825        }
2826    
2827        private final com.google.protobuf.UnknownFieldSet unknownFields;
2828        @java.lang.Override
2829        public final com.google.protobuf.UnknownFieldSet
2830            getUnknownFields() {
2831          return this.unknownFields;
2832        }
2833        private JournalRequestProto(
2834            com.google.protobuf.CodedInputStream input,
2835            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2836            throws com.google.protobuf.InvalidProtocolBufferException {
2837          initFields();
2838          int mutable_bitField0_ = 0;
2839          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
2840              com.google.protobuf.UnknownFieldSet.newBuilder();
2841          try {
2842            boolean done = false;
2843            while (!done) {
2844              int tag = input.readTag();
2845              switch (tag) {
2846                case 0:
2847                  done = true;
2848                  break;
2849                default: {
2850                  if (!parseUnknownField(input, unknownFields,
2851                                         extensionRegistry, tag)) {
2852                    done = true;
2853                  }
2854                  break;
2855                }
2856                case 10: {
2857                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
2858                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
2859                    subBuilder = reqInfo_.toBuilder();
2860                  }
2861                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
2862                  if (subBuilder != null) {
2863                    subBuilder.mergeFrom(reqInfo_);
2864                    reqInfo_ = subBuilder.buildPartial();
2865                  }
2866                  bitField0_ |= 0x00000001;
2867                  break;
2868                }
2869                case 16: {
2870                  bitField0_ |= 0x00000002;
2871                  firstTxnId_ = input.readUInt64();
2872                  break;
2873                }
2874                case 24: {
2875                  bitField0_ |= 0x00000004;
2876                  numTxns_ = input.readUInt32();
2877                  break;
2878                }
2879                case 34: {
2880                  bitField0_ |= 0x00000008;
2881                  records_ = input.readBytes();
2882                  break;
2883                }
2884                case 40: {
2885                  bitField0_ |= 0x00000010;
2886                  segmentTxnId_ = input.readUInt64();
2887                  break;
2888                }
2889              }
2890            }
2891          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
2892            throw e.setUnfinishedMessage(this);
2893          } catch (java.io.IOException e) {
2894            throw new com.google.protobuf.InvalidProtocolBufferException(
2895                e.getMessage()).setUnfinishedMessage(this);
2896          } finally {
2897            this.unknownFields = unknownFields.build();
2898            makeExtensionsImmutable();
2899          }
2900        }
2901        public static final com.google.protobuf.Descriptors.Descriptor
2902            getDescriptor() {
2903          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalRequestProto_descriptor;
2904        }
2905    
2906        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
2907            internalGetFieldAccessorTable() {
2908          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalRequestProto_fieldAccessorTable
2909              .ensureFieldAccessorsInitialized(
2910                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.Builder.class);
2911        }
2912    
2913        public static com.google.protobuf.Parser<JournalRequestProto> PARSER =
2914            new com.google.protobuf.AbstractParser<JournalRequestProto>() {
2915          public JournalRequestProto parsePartialFrom(
2916              com.google.protobuf.CodedInputStream input,
2917              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2918              throws com.google.protobuf.InvalidProtocolBufferException {
2919            return new JournalRequestProto(input, extensionRegistry);
2920          }
2921        };
2922    
2923        @java.lang.Override
2924        public com.google.protobuf.Parser<JournalRequestProto> getParserForType() {
2925          return PARSER;
2926        }
2927    
2928        private int bitField0_;
2929        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
2930        public static final int REQINFO_FIELD_NUMBER = 1;
2931        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
2932        /**
2933         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
2934         */
2935        public boolean hasReqInfo() {
2936          return ((bitField0_ & 0x00000001) == 0x00000001);
2937        }
2938        /**
2939         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
2940         */
2941        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
2942          return reqInfo_;
2943        }
2944        /**
2945         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
2946         */
2947        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
2948          return reqInfo_;
2949        }
2950    
2951        // required uint64 firstTxnId = 2;
2952        public static final int FIRSTTXNID_FIELD_NUMBER = 2;
2953        private long firstTxnId_;
2954        /**
2955         * <code>required uint64 firstTxnId = 2;</code>
2956         */
2957        public boolean hasFirstTxnId() {
2958          return ((bitField0_ & 0x00000002) == 0x00000002);
2959        }
2960        /**
2961         * <code>required uint64 firstTxnId = 2;</code>
2962         */
2963        public long getFirstTxnId() {
2964          return firstTxnId_;
2965        }
2966    
2967        // required uint32 numTxns = 3;
2968        public static final int NUMTXNS_FIELD_NUMBER = 3;
2969        private int numTxns_;
2970        /**
2971         * <code>required uint32 numTxns = 3;</code>
2972         */
2973        public boolean hasNumTxns() {
2974          return ((bitField0_ & 0x00000004) == 0x00000004);
2975        }
2976        /**
2977         * <code>required uint32 numTxns = 3;</code>
2978         */
2979        public int getNumTxns() {
2980          return numTxns_;
2981        }
2982    
2983        // required bytes records = 4;
2984        public static final int RECORDS_FIELD_NUMBER = 4;
2985        private com.google.protobuf.ByteString records_;
2986        /**
2987         * <code>required bytes records = 4;</code>
2988         */
2989        public boolean hasRecords() {
2990          return ((bitField0_ & 0x00000008) == 0x00000008);
2991        }
2992        /**
2993         * <code>required bytes records = 4;</code>
2994         */
2995        public com.google.protobuf.ByteString getRecords() {
2996          return records_;
2997        }
2998    
2999        // required uint64 segmentTxnId = 5;
3000        public static final int SEGMENTTXNID_FIELD_NUMBER = 5;
3001        private long segmentTxnId_;
3002        /**
3003         * <code>required uint64 segmentTxnId = 5;</code>
3004         */
3005        public boolean hasSegmentTxnId() {
3006          return ((bitField0_ & 0x00000010) == 0x00000010);
3007        }
3008        /**
3009         * <code>required uint64 segmentTxnId = 5;</code>
3010         */
3011        public long getSegmentTxnId() {
3012          return segmentTxnId_;
3013        }
3014    
3015        private void initFields() {
3016          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
3017          firstTxnId_ = 0L;
3018          numTxns_ = 0;
3019          records_ = com.google.protobuf.ByteString.EMPTY;
3020          segmentTxnId_ = 0L;
3021        }
3022        private byte memoizedIsInitialized = -1;
3023        public final boolean isInitialized() {
3024          byte isInitialized = memoizedIsInitialized;
3025          if (isInitialized != -1) return isInitialized == 1;
3026    
3027          if (!hasReqInfo()) {
3028            memoizedIsInitialized = 0;
3029            return false;
3030          }
3031          if (!hasFirstTxnId()) {
3032            memoizedIsInitialized = 0;
3033            return false;
3034          }
3035          if (!hasNumTxns()) {
3036            memoizedIsInitialized = 0;
3037            return false;
3038          }
3039          if (!hasRecords()) {
3040            memoizedIsInitialized = 0;
3041            return false;
3042          }
3043          if (!hasSegmentTxnId()) {
3044            memoizedIsInitialized = 0;
3045            return false;
3046          }
3047          if (!getReqInfo().isInitialized()) {
3048            memoizedIsInitialized = 0;
3049            return false;
3050          }
3051          memoizedIsInitialized = 1;
3052          return true;
3053        }
3054    
3055        public void writeTo(com.google.protobuf.CodedOutputStream output)
3056                            throws java.io.IOException {
3057          getSerializedSize();
3058          if (((bitField0_ & 0x00000001) == 0x00000001)) {
3059            output.writeMessage(1, reqInfo_);
3060          }
3061          if (((bitField0_ & 0x00000002) == 0x00000002)) {
3062            output.writeUInt64(2, firstTxnId_);
3063          }
3064          if (((bitField0_ & 0x00000004) == 0x00000004)) {
3065            output.writeUInt32(3, numTxns_);
3066          }
3067          if (((bitField0_ & 0x00000008) == 0x00000008)) {
3068            output.writeBytes(4, records_);
3069          }
3070          if (((bitField0_ & 0x00000010) == 0x00000010)) {
3071            output.writeUInt64(5, segmentTxnId_);
3072          }
3073          getUnknownFields().writeTo(output);
3074        }
3075    
3076        private int memoizedSerializedSize = -1;
3077        public int getSerializedSize() {
3078          int size = memoizedSerializedSize;
3079          if (size != -1) return size;
3080    
3081          size = 0;
3082          if (((bitField0_ & 0x00000001) == 0x00000001)) {
3083            size += com.google.protobuf.CodedOutputStream
3084              .computeMessageSize(1, reqInfo_);
3085          }
3086          if (((bitField0_ & 0x00000002) == 0x00000002)) {
3087            size += com.google.protobuf.CodedOutputStream
3088              .computeUInt64Size(2, firstTxnId_);
3089          }
3090          if (((bitField0_ & 0x00000004) == 0x00000004)) {
3091            size += com.google.protobuf.CodedOutputStream
3092              .computeUInt32Size(3, numTxns_);
3093          }
3094          if (((bitField0_ & 0x00000008) == 0x00000008)) {
3095            size += com.google.protobuf.CodedOutputStream
3096              .computeBytesSize(4, records_);
3097          }
3098          if (((bitField0_ & 0x00000010) == 0x00000010)) {
3099            size += com.google.protobuf.CodedOutputStream
3100              .computeUInt64Size(5, segmentTxnId_);
3101          }
3102          size += getUnknownFields().getSerializedSize();
3103          memoizedSerializedSize = size;
3104          return size;
3105        }
3106    
3107        private static final long serialVersionUID = 0L;
3108        @java.lang.Override
3109        protected java.lang.Object writeReplace()
3110            throws java.io.ObjectStreamException {
3111          return super.writeReplace();
3112        }
3113    
3114        @java.lang.Override
3115        public boolean equals(final java.lang.Object obj) {
3116          if (obj == this) {
3117           return true;
3118          }
3119          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)) {
3120            return super.equals(obj);
3121          }
3122          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto) obj;
3123    
3124          boolean result = true;
3125          result = result && (hasReqInfo() == other.hasReqInfo());
3126          if (hasReqInfo()) {
3127            result = result && getReqInfo()
3128                .equals(other.getReqInfo());
3129          }
3130          result = result && (hasFirstTxnId() == other.hasFirstTxnId());
3131          if (hasFirstTxnId()) {
3132            result = result && (getFirstTxnId()
3133                == other.getFirstTxnId());
3134          }
3135          result = result && (hasNumTxns() == other.hasNumTxns());
3136          if (hasNumTxns()) {
3137            result = result && (getNumTxns()
3138                == other.getNumTxns());
3139          }
3140          result = result && (hasRecords() == other.hasRecords());
3141          if (hasRecords()) {
3142            result = result && getRecords()
3143                .equals(other.getRecords());
3144          }
3145          result = result && (hasSegmentTxnId() == other.hasSegmentTxnId());
3146          if (hasSegmentTxnId()) {
3147            result = result && (getSegmentTxnId()
3148                == other.getSegmentTxnId());
3149          }
3150          result = result &&
3151              getUnknownFields().equals(other.getUnknownFields());
3152          return result;
3153        }
3154    
3155        private int memoizedHashCode = 0;
3156        @java.lang.Override
3157        public int hashCode() {
3158          if (memoizedHashCode != 0) {
3159            return memoizedHashCode;
3160          }
3161          int hash = 41;
3162          hash = (19 * hash) + getDescriptorForType().hashCode();
3163          if (hasReqInfo()) {
3164            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
3165            hash = (53 * hash) + getReqInfo().hashCode();
3166          }
3167          if (hasFirstTxnId()) {
3168            hash = (37 * hash) + FIRSTTXNID_FIELD_NUMBER;
3169            hash = (53 * hash) + hashLong(getFirstTxnId());
3170          }
3171          if (hasNumTxns()) {
3172            hash = (37 * hash) + NUMTXNS_FIELD_NUMBER;
3173            hash = (53 * hash) + getNumTxns();
3174          }
3175          if (hasRecords()) {
3176            hash = (37 * hash) + RECORDS_FIELD_NUMBER;
3177            hash = (53 * hash) + getRecords().hashCode();
3178          }
3179          if (hasSegmentTxnId()) {
3180            hash = (37 * hash) + SEGMENTTXNID_FIELD_NUMBER;
3181            hash = (53 * hash) + hashLong(getSegmentTxnId());
3182          }
3183          hash = (29 * hash) + getUnknownFields().hashCode();
3184          memoizedHashCode = hash;
3185          return hash;
3186        }
3187    
3188        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3189            com.google.protobuf.ByteString data)
3190            throws com.google.protobuf.InvalidProtocolBufferException {
3191          return PARSER.parseFrom(data);
3192        }
3193        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3194            com.google.protobuf.ByteString data,
3195            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3196            throws com.google.protobuf.InvalidProtocolBufferException {
3197          return PARSER.parseFrom(data, extensionRegistry);
3198        }
3199        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(byte[] data)
3200            throws com.google.protobuf.InvalidProtocolBufferException {
3201          return PARSER.parseFrom(data);
3202        }
3203        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3204            byte[] data,
3205            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3206            throws com.google.protobuf.InvalidProtocolBufferException {
3207          return PARSER.parseFrom(data, extensionRegistry);
3208        }
3209        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(java.io.InputStream input)
3210            throws java.io.IOException {
3211          return PARSER.parseFrom(input);
3212        }
3213        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3214            java.io.InputStream input,
3215            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3216            throws java.io.IOException {
3217          return PARSER.parseFrom(input, extensionRegistry);
3218        }
3219        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseDelimitedFrom(java.io.InputStream input)
3220            throws java.io.IOException {
3221          return PARSER.parseDelimitedFrom(input);
3222        }
3223        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseDelimitedFrom(
3224            java.io.InputStream input,
3225            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3226            throws java.io.IOException {
3227          return PARSER.parseDelimitedFrom(input, extensionRegistry);
3228        }
3229        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3230            com.google.protobuf.CodedInputStream input)
3231            throws java.io.IOException {
3232          return PARSER.parseFrom(input);
3233        }
3234        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3235            com.google.protobuf.CodedInputStream input,
3236            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3237            throws java.io.IOException {
3238          return PARSER.parseFrom(input, extensionRegistry);
3239        }
3240    
3241        public static Builder newBuilder() { return Builder.create(); }
3242        public Builder newBuilderForType() { return newBuilder(); }
3243        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto prototype) {
3244          return newBuilder().mergeFrom(prototype);
3245        }
3246        public Builder toBuilder() { return newBuilder(this); }
3247    
3248        @java.lang.Override
3249        protected Builder newBuilderForType(
3250            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3251          Builder builder = new Builder(parent);
3252          return builder;
3253        }
3254        /**
3255         * Protobuf type {@code hadoop.hdfs.JournalRequestProto}
3256         */
3257        public static final class Builder extends
3258            com.google.protobuf.GeneratedMessage.Builder<Builder>
3259           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProtoOrBuilder {
3260          public static final com.google.protobuf.Descriptors.Descriptor
3261              getDescriptor() {
3262            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalRequestProto_descriptor;
3263          }
3264    
3265          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
3266              internalGetFieldAccessorTable() {
3267            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalRequestProto_fieldAccessorTable
3268                .ensureFieldAccessorsInitialized(
3269                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.Builder.class);
3270          }
3271    
3272          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.newBuilder()
3273          private Builder() {
3274            maybeForceBuilderInitialization();
3275          }
3276    
3277          private Builder(
3278              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3279            super(parent);
3280            maybeForceBuilderInitialization();
3281          }
3282          private void maybeForceBuilderInitialization() {
3283            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
3284              getReqInfoFieldBuilder();
3285            }
3286          }
3287          private static Builder create() {
3288            return new Builder();
3289          }
3290    
3291          public Builder clear() {
3292            super.clear();
3293            if (reqInfoBuilder_ == null) {
3294              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
3295            } else {
3296              reqInfoBuilder_.clear();
3297            }
3298            bitField0_ = (bitField0_ & ~0x00000001);
3299            firstTxnId_ = 0L;
3300            bitField0_ = (bitField0_ & ~0x00000002);
3301            numTxns_ = 0;
3302            bitField0_ = (bitField0_ & ~0x00000004);
3303            records_ = com.google.protobuf.ByteString.EMPTY;
3304            bitField0_ = (bitField0_ & ~0x00000008);
3305            segmentTxnId_ = 0L;
3306            bitField0_ = (bitField0_ & ~0x00000010);
3307            return this;
3308          }
3309    
3310          public Builder clone() {
3311            return create().mergeFrom(buildPartial());
3312          }
3313    
3314          public com.google.protobuf.Descriptors.Descriptor
3315              getDescriptorForType() {
3316            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalRequestProto_descriptor;
3317          }
3318    
3319          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto getDefaultInstanceForType() {
3320            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance();
3321          }
3322    
3323          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto build() {
3324            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto result = buildPartial();
3325            if (!result.isInitialized()) {
3326              throw newUninitializedMessageException(result);
3327            }
3328            return result;
3329          }
3330    
3331          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto buildPartial() {
3332            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto(this);
3333            int from_bitField0_ = bitField0_;
3334            int to_bitField0_ = 0;
3335            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
3336              to_bitField0_ |= 0x00000001;
3337            }
3338            if (reqInfoBuilder_ == null) {
3339              result.reqInfo_ = reqInfo_;
3340            } else {
3341              result.reqInfo_ = reqInfoBuilder_.build();
3342            }
3343            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
3344              to_bitField0_ |= 0x00000002;
3345            }
3346            result.firstTxnId_ = firstTxnId_;
3347            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
3348              to_bitField0_ |= 0x00000004;
3349            }
3350            result.numTxns_ = numTxns_;
3351            if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
3352              to_bitField0_ |= 0x00000008;
3353            }
3354            result.records_ = records_;
3355            if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
3356              to_bitField0_ |= 0x00000010;
3357            }
3358            result.segmentTxnId_ = segmentTxnId_;
3359            result.bitField0_ = to_bitField0_;
3360            onBuilt();
3361            return result;
3362          }
3363    
3364          public Builder mergeFrom(com.google.protobuf.Message other) {
3365            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto) {
3366              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)other);
3367            } else {
3368              super.mergeFrom(other);
3369              return this;
3370            }
3371          }
3372    
3373          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto other) {
3374            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance()) return this;
3375            if (other.hasReqInfo()) {
3376              mergeReqInfo(other.getReqInfo());
3377            }
3378            if (other.hasFirstTxnId()) {
3379              setFirstTxnId(other.getFirstTxnId());
3380            }
3381            if (other.hasNumTxns()) {
3382              setNumTxns(other.getNumTxns());
3383            }
3384            if (other.hasRecords()) {
3385              setRecords(other.getRecords());
3386            }
3387            if (other.hasSegmentTxnId()) {
3388              setSegmentTxnId(other.getSegmentTxnId());
3389            }
3390            this.mergeUnknownFields(other.getUnknownFields());
3391            return this;
3392          }
3393    
3394          public final boolean isInitialized() {
3395            if (!hasReqInfo()) {
3396              
3397              return false;
3398            }
3399            if (!hasFirstTxnId()) {
3400              
3401              return false;
3402            }
3403            if (!hasNumTxns()) {
3404              
3405              return false;
3406            }
3407            if (!hasRecords()) {
3408              
3409              return false;
3410            }
3411            if (!hasSegmentTxnId()) {
3412              
3413              return false;
3414            }
3415            if (!getReqInfo().isInitialized()) {
3416              
3417              return false;
3418            }
3419            return true;
3420          }
3421    
3422          public Builder mergeFrom(
3423              com.google.protobuf.CodedInputStream input,
3424              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3425              throws java.io.IOException {
3426            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parsedMessage = null;
3427            try {
3428              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
3429            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
3430              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto) e.getUnfinishedMessage();
3431              throw e;
3432            } finally {
3433              if (parsedMessage != null) {
3434                mergeFrom(parsedMessage);
3435              }
3436            }
3437            return this;
3438          }
3439          private int bitField0_;
3440    
3441          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
3442          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
3443          private com.google.protobuf.SingleFieldBuilder<
3444              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
3445          /**
3446           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3447           */
3448          public boolean hasReqInfo() {
3449            return ((bitField0_ & 0x00000001) == 0x00000001);
3450          }
3451          /**
3452           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3453           */
3454          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
3455            if (reqInfoBuilder_ == null) {
3456              return reqInfo_;
3457            } else {
3458              return reqInfoBuilder_.getMessage();
3459            }
3460          }
3461          /**
3462           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3463           */
3464          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
3465            if (reqInfoBuilder_ == null) {
3466              if (value == null) {
3467                throw new NullPointerException();
3468              }
3469              reqInfo_ = value;
3470              onChanged();
3471            } else {
3472              reqInfoBuilder_.setMessage(value);
3473            }
3474            bitField0_ |= 0x00000001;
3475            return this;
3476          }
3477          /**
3478           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3479           */
3480          public Builder setReqInfo(
3481              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
3482            if (reqInfoBuilder_ == null) {
3483              reqInfo_ = builderForValue.build();
3484              onChanged();
3485            } else {
3486              reqInfoBuilder_.setMessage(builderForValue.build());
3487            }
3488            bitField0_ |= 0x00000001;
3489            return this;
3490          }
3491          /**
3492           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3493           */
3494          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
3495            if (reqInfoBuilder_ == null) {
3496              if (((bitField0_ & 0x00000001) == 0x00000001) &&
3497                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
3498                reqInfo_ =
3499                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
3500              } else {
3501                reqInfo_ = value;
3502              }
3503              onChanged();
3504            } else {
3505              reqInfoBuilder_.mergeFrom(value);
3506            }
3507            bitField0_ |= 0x00000001;
3508            return this;
3509          }
3510          /**
3511           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3512           */
3513          public Builder clearReqInfo() {
3514            if (reqInfoBuilder_ == null) {
3515              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
3516              onChanged();
3517            } else {
3518              reqInfoBuilder_.clear();
3519            }
3520            bitField0_ = (bitField0_ & ~0x00000001);
3521            return this;
3522          }
3523          /**
3524           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3525           */
3526          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
3527            bitField0_ |= 0x00000001;
3528            onChanged();
3529            return getReqInfoFieldBuilder().getBuilder();
3530          }
3531          /**
3532           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3533           */
3534          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
3535            if (reqInfoBuilder_ != null) {
3536              return reqInfoBuilder_.getMessageOrBuilder();
3537            } else {
3538              return reqInfo_;
3539            }
3540          }
3541          /**
3542           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
3543           */
3544          private com.google.protobuf.SingleFieldBuilder<
3545              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
3546              getReqInfoFieldBuilder() {
3547            if (reqInfoBuilder_ == null) {
3548              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
3549                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
3550                      reqInfo_,
3551                      getParentForChildren(),
3552                      isClean());
3553              reqInfo_ = null;
3554            }
3555            return reqInfoBuilder_;
3556          }
3557    
3558          // required uint64 firstTxnId = 2;
3559          private long firstTxnId_ ;
3560          /**
3561           * <code>required uint64 firstTxnId = 2;</code>
3562           */
3563          public boolean hasFirstTxnId() {
3564            return ((bitField0_ & 0x00000002) == 0x00000002);
3565          }
3566          /**
3567           * <code>required uint64 firstTxnId = 2;</code>
3568           */
3569          public long getFirstTxnId() {
3570            return firstTxnId_;
3571          }
3572          /**
3573           * <code>required uint64 firstTxnId = 2;</code>
3574           */
3575          public Builder setFirstTxnId(long value) {
3576            bitField0_ |= 0x00000002;
3577            firstTxnId_ = value;
3578            onChanged();
3579            return this;
3580          }
3581          /**
3582           * <code>required uint64 firstTxnId = 2;</code>
3583           */
3584          public Builder clearFirstTxnId() {
3585            bitField0_ = (bitField0_ & ~0x00000002);
3586            firstTxnId_ = 0L;
3587            onChanged();
3588            return this;
3589          }
3590    
3591          // required uint32 numTxns = 3;
3592          private int numTxns_ ;
3593          /**
3594           * <code>required uint32 numTxns = 3;</code>
3595           */
3596          public boolean hasNumTxns() {
3597            return ((bitField0_ & 0x00000004) == 0x00000004);
3598          }
3599          /**
3600           * <code>required uint32 numTxns = 3;</code>
3601           */
3602          public int getNumTxns() {
3603            return numTxns_;
3604          }
3605          /**
3606           * <code>required uint32 numTxns = 3;</code>
3607           */
3608          public Builder setNumTxns(int value) {
3609            bitField0_ |= 0x00000004;
3610            numTxns_ = value;
3611            onChanged();
3612            return this;
3613          }
3614          /**
3615           * <code>required uint32 numTxns = 3;</code>
3616           */
3617          public Builder clearNumTxns() {
3618            bitField0_ = (bitField0_ & ~0x00000004);
3619            numTxns_ = 0;
3620            onChanged();
3621            return this;
3622          }
3623    
3624          // required bytes records = 4;
3625          private com.google.protobuf.ByteString records_ = com.google.protobuf.ByteString.EMPTY;
3626          /**
3627           * <code>required bytes records = 4;</code>
3628           */
3629          public boolean hasRecords() {
3630            return ((bitField0_ & 0x00000008) == 0x00000008);
3631          }
3632          /**
3633           * <code>required bytes records = 4;</code>
3634           */
3635          public com.google.protobuf.ByteString getRecords() {
3636            return records_;
3637          }
3638          /**
3639           * <code>required bytes records = 4;</code>
3640           */
3641          public Builder setRecords(com.google.protobuf.ByteString value) {
3642            if (value == null) {
3643        throw new NullPointerException();
3644      }
3645      bitField0_ |= 0x00000008;
3646            records_ = value;
3647            onChanged();
3648            return this;
3649          }
3650          /**
3651           * <code>required bytes records = 4;</code>
3652           */
3653          public Builder clearRecords() {
3654            bitField0_ = (bitField0_ & ~0x00000008);
3655            records_ = getDefaultInstance().getRecords();
3656            onChanged();
3657            return this;
3658          }
3659    
3660          // required uint64 segmentTxnId = 5;
3661          private long segmentTxnId_ ;
3662          /**
3663           * <code>required uint64 segmentTxnId = 5;</code>
3664           */
3665          public boolean hasSegmentTxnId() {
3666            return ((bitField0_ & 0x00000010) == 0x00000010);
3667          }
3668          /**
3669           * <code>required uint64 segmentTxnId = 5;</code>
3670           */
3671          public long getSegmentTxnId() {
3672            return segmentTxnId_;
3673          }
3674          /**
3675           * <code>required uint64 segmentTxnId = 5;</code>
3676           */
3677          public Builder setSegmentTxnId(long value) {
3678            bitField0_ |= 0x00000010;
3679            segmentTxnId_ = value;
3680            onChanged();
3681            return this;
3682          }
3683          /**
3684           * <code>required uint64 segmentTxnId = 5;</code>
3685           */
3686          public Builder clearSegmentTxnId() {
3687            bitField0_ = (bitField0_ & ~0x00000010);
3688            segmentTxnId_ = 0L;
3689            onChanged();
3690            return this;
3691          }
3692    
3693          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.JournalRequestProto)
3694        }
3695    
3696        static {
3697          defaultInstance = new JournalRequestProto(true);
3698          defaultInstance.initFields();
3699        }
3700    
3701        // @@protoc_insertion_point(class_scope:hadoop.hdfs.JournalRequestProto)
3702      }
3703    
3704      public interface JournalResponseProtoOrBuilder
3705          extends com.google.protobuf.MessageOrBuilder {
3706      }
3707      /**
3708       * Protobuf type {@code hadoop.hdfs.JournalResponseProto}
3709       */
3710      public static final class JournalResponseProto extends
3711          com.google.protobuf.GeneratedMessage
3712          implements JournalResponseProtoOrBuilder {
3713        // Use JournalResponseProto.newBuilder() to construct.
3714        private JournalResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
3715          super(builder);
3716          this.unknownFields = builder.getUnknownFields();
3717        }
3718        private JournalResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
3719    
3720        private static final JournalResponseProto defaultInstance;
3721        public static JournalResponseProto getDefaultInstance() {
3722          return defaultInstance;
3723        }
3724    
3725        public JournalResponseProto getDefaultInstanceForType() {
3726          return defaultInstance;
3727        }
3728    
3729        private final com.google.protobuf.UnknownFieldSet unknownFields;
3730        @java.lang.Override
3731        public final com.google.protobuf.UnknownFieldSet
3732            getUnknownFields() {
3733          return this.unknownFields;
3734        }
3735        private JournalResponseProto(
3736            com.google.protobuf.CodedInputStream input,
3737            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3738            throws com.google.protobuf.InvalidProtocolBufferException {
3739          initFields();
3740          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
3741              com.google.protobuf.UnknownFieldSet.newBuilder();
3742          try {
3743            boolean done = false;
3744            while (!done) {
3745              int tag = input.readTag();
3746              switch (tag) {
3747                case 0:
3748                  done = true;
3749                  break;
3750                default: {
3751                  if (!parseUnknownField(input, unknownFields,
3752                                         extensionRegistry, tag)) {
3753                    done = true;
3754                  }
3755                  break;
3756                }
3757              }
3758            }
3759          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
3760            throw e.setUnfinishedMessage(this);
3761          } catch (java.io.IOException e) {
3762            throw new com.google.protobuf.InvalidProtocolBufferException(
3763                e.getMessage()).setUnfinishedMessage(this);
3764          } finally {
3765            this.unknownFields = unknownFields.build();
3766            makeExtensionsImmutable();
3767          }
3768        }
3769        public static final com.google.protobuf.Descriptors.Descriptor
3770            getDescriptor() {
3771          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalResponseProto_descriptor;
3772        }
3773    
3774        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
3775            internalGetFieldAccessorTable() {
3776          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalResponseProto_fieldAccessorTable
3777              .ensureFieldAccessorsInitialized(
3778                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.Builder.class);
3779        }
3780    
3781        public static com.google.protobuf.Parser<JournalResponseProto> PARSER =
3782            new com.google.protobuf.AbstractParser<JournalResponseProto>() {
3783          public JournalResponseProto parsePartialFrom(
3784              com.google.protobuf.CodedInputStream input,
3785              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3786              throws com.google.protobuf.InvalidProtocolBufferException {
3787            return new JournalResponseProto(input, extensionRegistry);
3788          }
3789        };
3790    
3791        @java.lang.Override
3792        public com.google.protobuf.Parser<JournalResponseProto> getParserForType() {
3793          return PARSER;
3794        }
3795    
3796        private void initFields() {
3797        }
3798        private byte memoizedIsInitialized = -1;
3799        public final boolean isInitialized() {
3800          byte isInitialized = memoizedIsInitialized;
3801          if (isInitialized != -1) return isInitialized == 1;
3802    
3803          memoizedIsInitialized = 1;
3804          return true;
3805        }
3806    
3807        public void writeTo(com.google.protobuf.CodedOutputStream output)
3808                            throws java.io.IOException {
3809          getSerializedSize();
3810          getUnknownFields().writeTo(output);
3811        }
3812    
3813        private int memoizedSerializedSize = -1;
3814        public int getSerializedSize() {
3815          int size = memoizedSerializedSize;
3816          if (size != -1) return size;
3817    
3818          size = 0;
3819          size += getUnknownFields().getSerializedSize();
3820          memoizedSerializedSize = size;
3821          return size;
3822        }
3823    
3824        private static final long serialVersionUID = 0L;
3825        @java.lang.Override
3826        protected java.lang.Object writeReplace()
3827            throws java.io.ObjectStreamException {
3828          return super.writeReplace();
3829        }
3830    
3831        @java.lang.Override
3832        public boolean equals(final java.lang.Object obj) {
3833          if (obj == this) {
3834           return true;
3835          }
3836          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto)) {
3837            return super.equals(obj);
3838          }
3839          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) obj;
3840    
3841          boolean result = true;
3842          result = result &&
3843              getUnknownFields().equals(other.getUnknownFields());
3844          return result;
3845        }
3846    
3847        private int memoizedHashCode = 0;
3848        @java.lang.Override
3849        public int hashCode() {
3850          if (memoizedHashCode != 0) {
3851            return memoizedHashCode;
3852          }
3853          int hash = 41;
3854          hash = (19 * hash) + getDescriptorForType().hashCode();
3855          hash = (29 * hash) + getUnknownFields().hashCode();
3856          memoizedHashCode = hash;
3857          return hash;
3858        }
3859    
3860        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3861            com.google.protobuf.ByteString data)
3862            throws com.google.protobuf.InvalidProtocolBufferException {
3863          return PARSER.parseFrom(data);
3864        }
3865        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3866            com.google.protobuf.ByteString data,
3867            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3868            throws com.google.protobuf.InvalidProtocolBufferException {
3869          return PARSER.parseFrom(data, extensionRegistry);
3870        }
3871        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(byte[] data)
3872            throws com.google.protobuf.InvalidProtocolBufferException {
3873          return PARSER.parseFrom(data);
3874        }
3875        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3876            byte[] data,
3877            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3878            throws com.google.protobuf.InvalidProtocolBufferException {
3879          return PARSER.parseFrom(data, extensionRegistry);
3880        }
3881        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(java.io.InputStream input)
3882            throws java.io.IOException {
3883          return PARSER.parseFrom(input);
3884        }
3885        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3886            java.io.InputStream input,
3887            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3888            throws java.io.IOException {
3889          return PARSER.parseFrom(input, extensionRegistry);
3890        }
3891        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseDelimitedFrom(java.io.InputStream input)
3892            throws java.io.IOException {
3893          return PARSER.parseDelimitedFrom(input);
3894        }
3895        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseDelimitedFrom(
3896            java.io.InputStream input,
3897            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3898            throws java.io.IOException {
3899          return PARSER.parseDelimitedFrom(input, extensionRegistry);
3900        }
3901        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3902            com.google.protobuf.CodedInputStream input)
3903            throws java.io.IOException {
3904          return PARSER.parseFrom(input);
3905        }
3906        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3907            com.google.protobuf.CodedInputStream input,
3908            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3909            throws java.io.IOException {
3910          return PARSER.parseFrom(input, extensionRegistry);
3911        }
3912    
3913        public static Builder newBuilder() { return Builder.create(); }
3914        public Builder newBuilderForType() { return newBuilder(); }
3915        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto prototype) {
3916          return newBuilder().mergeFrom(prototype);
3917        }
3918        public Builder toBuilder() { return newBuilder(this); }
3919    
3920        @java.lang.Override
3921        protected Builder newBuilderForType(
3922            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3923          Builder builder = new Builder(parent);
3924          return builder;
3925        }
3926        /**
3927         * Protobuf type {@code hadoop.hdfs.JournalResponseProto}
3928         */
3929        public static final class Builder extends
3930            com.google.protobuf.GeneratedMessage.Builder<Builder>
3931           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProtoOrBuilder {
3932          public static final com.google.protobuf.Descriptors.Descriptor
3933              getDescriptor() {
3934            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalResponseProto_descriptor;
3935          }
3936    
3937          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
3938              internalGetFieldAccessorTable() {
3939            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalResponseProto_fieldAccessorTable
3940                .ensureFieldAccessorsInitialized(
3941                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.Builder.class);
3942          }
3943    
3944          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.newBuilder()
3945          private Builder() {
3946            maybeForceBuilderInitialization();
3947          }
3948    
3949          private Builder(
3950              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3951            super(parent);
3952            maybeForceBuilderInitialization();
3953          }
3954          private void maybeForceBuilderInitialization() {
3955            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
3956            }
3957          }
3958          private static Builder create() {
3959            return new Builder();
3960          }
3961    
3962          public Builder clear() {
3963            super.clear();
3964            return this;
3965          }
3966    
3967          public Builder clone() {
3968            return create().mergeFrom(buildPartial());
3969          }
3970    
3971          public com.google.protobuf.Descriptors.Descriptor
3972              getDescriptorForType() {
3973            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalResponseProto_descriptor;
3974          }
3975    
3976          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto getDefaultInstanceForType() {
3977            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance();
3978          }
3979    
3980          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto build() {
3981            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto result = buildPartial();
3982            if (!result.isInitialized()) {
3983              throw newUninitializedMessageException(result);
3984            }
3985            return result;
3986          }
3987    
3988          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto buildPartial() {
3989            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto(this);
3990            onBuilt();
3991            return result;
3992          }
3993    
3994          public Builder mergeFrom(com.google.protobuf.Message other) {
3995            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) {
3996              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto)other);
3997            } else {
3998              super.mergeFrom(other);
3999              return this;
4000            }
4001          }
4002    
4003          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto other) {
4004            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance()) return this;
4005            this.mergeUnknownFields(other.getUnknownFields());
4006            return this;
4007          }
4008    
4009          public final boolean isInitialized() {
4010            return true;
4011          }
4012    
4013          public Builder mergeFrom(
4014              com.google.protobuf.CodedInputStream input,
4015              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4016              throws java.io.IOException {
4017            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parsedMessage = null;
4018            try {
4019              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
4020            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4021              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) e.getUnfinishedMessage();
4022              throw e;
4023            } finally {
4024              if (parsedMessage != null) {
4025                mergeFrom(parsedMessage);
4026              }
4027            }
4028            return this;
4029          }
4030    
4031          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.JournalResponseProto)
4032        }
4033    
4034        static {
4035          defaultInstance = new JournalResponseProto(true);
4036          defaultInstance.initFields();
4037        }
4038    
4039        // @@protoc_insertion_point(class_scope:hadoop.hdfs.JournalResponseProto)
4040      }
4041    
4042      public interface HeartbeatRequestProtoOrBuilder
4043          extends com.google.protobuf.MessageOrBuilder {
4044    
4045        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
4046        /**
4047         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4048         */
4049        boolean hasReqInfo();
4050        /**
4051         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4052         */
4053        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
4054        /**
4055         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4056         */
4057        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
4058      }
4059      /**
4060       * Protobuf type {@code hadoop.hdfs.HeartbeatRequestProto}
4061       */
4062      public static final class HeartbeatRequestProto extends
4063          com.google.protobuf.GeneratedMessage
4064          implements HeartbeatRequestProtoOrBuilder {
4065        // Use HeartbeatRequestProto.newBuilder() to construct.
4066        private HeartbeatRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
4067          super(builder);
4068          this.unknownFields = builder.getUnknownFields();
4069        }
4070        private HeartbeatRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
4071    
4072        private static final HeartbeatRequestProto defaultInstance;
4073        public static HeartbeatRequestProto getDefaultInstance() {
4074          return defaultInstance;
4075        }
4076    
4077        public HeartbeatRequestProto getDefaultInstanceForType() {
4078          return defaultInstance;
4079        }
4080    
4081        private final com.google.protobuf.UnknownFieldSet unknownFields;
4082        @java.lang.Override
4083        public final com.google.protobuf.UnknownFieldSet
4084            getUnknownFields() {
4085          return this.unknownFields;
4086        }
4087        private HeartbeatRequestProto(
4088            com.google.protobuf.CodedInputStream input,
4089            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4090            throws com.google.protobuf.InvalidProtocolBufferException {
4091          initFields();
4092          int mutable_bitField0_ = 0;
4093          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
4094              com.google.protobuf.UnknownFieldSet.newBuilder();
4095          try {
4096            boolean done = false;
4097            while (!done) {
4098              int tag = input.readTag();
4099              switch (tag) {
4100                case 0:
4101                  done = true;
4102                  break;
4103                default: {
4104                  if (!parseUnknownField(input, unknownFields,
4105                                         extensionRegistry, tag)) {
4106                    done = true;
4107                  }
4108                  break;
4109                }
4110                case 10: {
4111                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
4112                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
4113                    subBuilder = reqInfo_.toBuilder();
4114                  }
4115                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
4116                  if (subBuilder != null) {
4117                    subBuilder.mergeFrom(reqInfo_);
4118                    reqInfo_ = subBuilder.buildPartial();
4119                  }
4120                  bitField0_ |= 0x00000001;
4121                  break;
4122                }
4123              }
4124            }
4125          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4126            throw e.setUnfinishedMessage(this);
4127          } catch (java.io.IOException e) {
4128            throw new com.google.protobuf.InvalidProtocolBufferException(
4129                e.getMessage()).setUnfinishedMessage(this);
4130          } finally {
4131            this.unknownFields = unknownFields.build();
4132            makeExtensionsImmutable();
4133          }
4134        }
4135        public static final com.google.protobuf.Descriptors.Descriptor
4136            getDescriptor() {
4137          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor;
4138        }
4139    
4140        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4141            internalGetFieldAccessorTable() {
4142          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatRequestProto_fieldAccessorTable
4143              .ensureFieldAccessorsInitialized(
4144                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.Builder.class);
4145        }
4146    
4147        public static com.google.protobuf.Parser<HeartbeatRequestProto> PARSER =
4148            new com.google.protobuf.AbstractParser<HeartbeatRequestProto>() {
4149          public HeartbeatRequestProto parsePartialFrom(
4150              com.google.protobuf.CodedInputStream input,
4151              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4152              throws com.google.protobuf.InvalidProtocolBufferException {
4153            return new HeartbeatRequestProto(input, extensionRegistry);
4154          }
4155        };
4156    
4157        @java.lang.Override
4158        public com.google.protobuf.Parser<HeartbeatRequestProto> getParserForType() {
4159          return PARSER;
4160        }
4161    
4162        private int bitField0_;
4163        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
4164        public static final int REQINFO_FIELD_NUMBER = 1;
4165        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
4166        /**
4167         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4168         */
4169        public boolean hasReqInfo() {
4170          return ((bitField0_ & 0x00000001) == 0x00000001);
4171        }
4172        /**
4173         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4174         */
4175        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
4176          return reqInfo_;
4177        }
4178        /**
4179         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4180         */
4181        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
4182          return reqInfo_;
4183        }
4184    
4185        private void initFields() {
4186          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
4187        }
4188        private byte memoizedIsInitialized = -1;
4189        public final boolean isInitialized() {
4190          byte isInitialized = memoizedIsInitialized;
4191          if (isInitialized != -1) return isInitialized == 1;
4192    
4193          if (!hasReqInfo()) {
4194            memoizedIsInitialized = 0;
4195            return false;
4196          }
4197          if (!getReqInfo().isInitialized()) {
4198            memoizedIsInitialized = 0;
4199            return false;
4200          }
4201          memoizedIsInitialized = 1;
4202          return true;
4203        }
4204    
4205        public void writeTo(com.google.protobuf.CodedOutputStream output)
4206                            throws java.io.IOException {
4207          getSerializedSize();
4208          if (((bitField0_ & 0x00000001) == 0x00000001)) {
4209            output.writeMessage(1, reqInfo_);
4210          }
4211          getUnknownFields().writeTo(output);
4212        }
4213    
4214        private int memoizedSerializedSize = -1;
4215        public int getSerializedSize() {
4216          int size = memoizedSerializedSize;
4217          if (size != -1) return size;
4218    
4219          size = 0;
4220          if (((bitField0_ & 0x00000001) == 0x00000001)) {
4221            size += com.google.protobuf.CodedOutputStream
4222              .computeMessageSize(1, reqInfo_);
4223          }
4224          size += getUnknownFields().getSerializedSize();
4225          memoizedSerializedSize = size;
4226          return size;
4227        }
4228    
4229        private static final long serialVersionUID = 0L;
4230        @java.lang.Override
4231        protected java.lang.Object writeReplace()
4232            throws java.io.ObjectStreamException {
4233          return super.writeReplace();
4234        }
4235    
4236        @java.lang.Override
4237        public boolean equals(final java.lang.Object obj) {
4238          if (obj == this) {
4239           return true;
4240          }
4241          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)) {
4242            return super.equals(obj);
4243          }
4244          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto) obj;
4245    
4246          boolean result = true;
4247          result = result && (hasReqInfo() == other.hasReqInfo());
4248          if (hasReqInfo()) {
4249            result = result && getReqInfo()
4250                .equals(other.getReqInfo());
4251          }
4252          result = result &&
4253              getUnknownFields().equals(other.getUnknownFields());
4254          return result;
4255        }
4256    
4257        private int memoizedHashCode = 0;
4258        @java.lang.Override
4259        public int hashCode() {
4260          if (memoizedHashCode != 0) {
4261            return memoizedHashCode;
4262          }
4263          int hash = 41;
4264          hash = (19 * hash) + getDescriptorForType().hashCode();
4265          if (hasReqInfo()) {
4266            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
4267            hash = (53 * hash) + getReqInfo().hashCode();
4268          }
4269          hash = (29 * hash) + getUnknownFields().hashCode();
4270          memoizedHashCode = hash;
4271          return hash;
4272        }
4273    
4274        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4275            com.google.protobuf.ByteString data)
4276            throws com.google.protobuf.InvalidProtocolBufferException {
4277          return PARSER.parseFrom(data);
4278        }
4279        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4280            com.google.protobuf.ByteString data,
4281            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4282            throws com.google.protobuf.InvalidProtocolBufferException {
4283          return PARSER.parseFrom(data, extensionRegistry);
4284        }
4285        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(byte[] data)
4286            throws com.google.protobuf.InvalidProtocolBufferException {
4287          return PARSER.parseFrom(data);
4288        }
4289        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4290            byte[] data,
4291            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4292            throws com.google.protobuf.InvalidProtocolBufferException {
4293          return PARSER.parseFrom(data, extensionRegistry);
4294        }
4295        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(java.io.InputStream input)
4296            throws java.io.IOException {
4297          return PARSER.parseFrom(input);
4298        }
4299        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4300            java.io.InputStream input,
4301            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4302            throws java.io.IOException {
4303          return PARSER.parseFrom(input, extensionRegistry);
4304        }
4305        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseDelimitedFrom(java.io.InputStream input)
4306            throws java.io.IOException {
4307          return PARSER.parseDelimitedFrom(input);
4308        }
4309        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseDelimitedFrom(
4310            java.io.InputStream input,
4311            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4312            throws java.io.IOException {
4313          return PARSER.parseDelimitedFrom(input, extensionRegistry);
4314        }
4315        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4316            com.google.protobuf.CodedInputStream input)
4317            throws java.io.IOException {
4318          return PARSER.parseFrom(input);
4319        }
4320        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4321            com.google.protobuf.CodedInputStream input,
4322            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4323            throws java.io.IOException {
4324          return PARSER.parseFrom(input, extensionRegistry);
4325        }
4326    
4327        public static Builder newBuilder() { return Builder.create(); }
4328        public Builder newBuilderForType() { return newBuilder(); }
4329        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto prototype) {
4330          return newBuilder().mergeFrom(prototype);
4331        }
4332        public Builder toBuilder() { return newBuilder(this); }
4333    
4334        @java.lang.Override
4335        protected Builder newBuilderForType(
4336            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4337          Builder builder = new Builder(parent);
4338          return builder;
4339        }
4340        /**
4341         * Protobuf type {@code hadoop.hdfs.HeartbeatRequestProto}
4342         */
4343        public static final class Builder extends
4344            com.google.protobuf.GeneratedMessage.Builder<Builder>
4345           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProtoOrBuilder {
4346          public static final com.google.protobuf.Descriptors.Descriptor
4347              getDescriptor() {
4348            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor;
4349          }
4350    
4351          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4352              internalGetFieldAccessorTable() {
4353            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatRequestProto_fieldAccessorTable
4354                .ensureFieldAccessorsInitialized(
4355                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.Builder.class);
4356          }
4357    
4358          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.newBuilder()
4359          private Builder() {
4360            maybeForceBuilderInitialization();
4361          }
4362    
4363          private Builder(
4364              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4365            super(parent);
4366            maybeForceBuilderInitialization();
4367          }
4368          private void maybeForceBuilderInitialization() {
4369            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
4370              getReqInfoFieldBuilder();
4371            }
4372          }
4373          private static Builder create() {
4374            return new Builder();
4375          }
4376    
4377          public Builder clear() {
4378            super.clear();
4379            if (reqInfoBuilder_ == null) {
4380              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
4381            } else {
4382              reqInfoBuilder_.clear();
4383            }
4384            bitField0_ = (bitField0_ & ~0x00000001);
4385            return this;
4386          }
4387    
4388          public Builder clone() {
4389            return create().mergeFrom(buildPartial());
4390          }
4391    
4392          public com.google.protobuf.Descriptors.Descriptor
4393              getDescriptorForType() {
4394            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor;
4395          }
4396    
4397          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto getDefaultInstanceForType() {
4398            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
4399          }
4400    
4401          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto build() {
4402            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto result = buildPartial();
4403            if (!result.isInitialized()) {
4404              throw newUninitializedMessageException(result);
4405            }
4406            return result;
4407          }
4408    
4409          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto buildPartial() {
4410            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto(this);
4411            int from_bitField0_ = bitField0_;
4412            int to_bitField0_ = 0;
4413            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
4414              to_bitField0_ |= 0x00000001;
4415            }
4416            if (reqInfoBuilder_ == null) {
4417              result.reqInfo_ = reqInfo_;
4418            } else {
4419              result.reqInfo_ = reqInfoBuilder_.build();
4420            }
4421            result.bitField0_ = to_bitField0_;
4422            onBuilt();
4423            return result;
4424          }
4425    
4426          public Builder mergeFrom(com.google.protobuf.Message other) {
4427            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto) {
4428              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)other);
4429            } else {
4430              super.mergeFrom(other);
4431              return this;
4432            }
4433          }
4434    
4435          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto other) {
4436            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance()) return this;
4437            if (other.hasReqInfo()) {
4438              mergeReqInfo(other.getReqInfo());
4439            }
4440            this.mergeUnknownFields(other.getUnknownFields());
4441            return this;
4442          }
4443    
4444          public final boolean isInitialized() {
4445            if (!hasReqInfo()) {
4446              
4447              return false;
4448            }
4449            if (!getReqInfo().isInitialized()) {
4450              
4451              return false;
4452            }
4453            return true;
4454          }
4455    
4456          public Builder mergeFrom(
4457              com.google.protobuf.CodedInputStream input,
4458              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4459              throws java.io.IOException {
4460            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parsedMessage = null;
4461            try {
4462              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
4463            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4464              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto) e.getUnfinishedMessage();
4465              throw e;
4466            } finally {
4467              if (parsedMessage != null) {
4468                mergeFrom(parsedMessage);
4469              }
4470            }
4471            return this;
4472          }
4473          private int bitField0_;
4474    
4475          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
4476          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
4477          private com.google.protobuf.SingleFieldBuilder<
4478              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
4479          /**
4480           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4481           */
4482          public boolean hasReqInfo() {
4483            return ((bitField0_ & 0x00000001) == 0x00000001);
4484          }
4485          /**
4486           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4487           */
4488          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
4489            if (reqInfoBuilder_ == null) {
4490              return reqInfo_;
4491            } else {
4492              return reqInfoBuilder_.getMessage();
4493            }
4494          }
4495          /**
4496           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4497           */
4498          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
4499            if (reqInfoBuilder_ == null) {
4500              if (value == null) {
4501                throw new NullPointerException();
4502              }
4503              reqInfo_ = value;
4504              onChanged();
4505            } else {
4506              reqInfoBuilder_.setMessage(value);
4507            }
4508            bitField0_ |= 0x00000001;
4509            return this;
4510          }
4511          /**
4512           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4513           */
4514          public Builder setReqInfo(
4515              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
4516            if (reqInfoBuilder_ == null) {
4517              reqInfo_ = builderForValue.build();
4518              onChanged();
4519            } else {
4520              reqInfoBuilder_.setMessage(builderForValue.build());
4521            }
4522            bitField0_ |= 0x00000001;
4523            return this;
4524          }
4525          /**
4526           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4527           */
4528          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
4529            if (reqInfoBuilder_ == null) {
4530              if (((bitField0_ & 0x00000001) == 0x00000001) &&
4531                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
4532                reqInfo_ =
4533                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
4534              } else {
4535                reqInfo_ = value;
4536              }
4537              onChanged();
4538            } else {
4539              reqInfoBuilder_.mergeFrom(value);
4540            }
4541            bitField0_ |= 0x00000001;
4542            return this;
4543          }
4544          /**
4545           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4546           */
4547          public Builder clearReqInfo() {
4548            if (reqInfoBuilder_ == null) {
4549              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
4550              onChanged();
4551            } else {
4552              reqInfoBuilder_.clear();
4553            }
4554            bitField0_ = (bitField0_ & ~0x00000001);
4555            return this;
4556          }
4557          /**
4558           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4559           */
4560          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
4561            bitField0_ |= 0x00000001;
4562            onChanged();
4563            return getReqInfoFieldBuilder().getBuilder();
4564          }
4565          /**
4566           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4567           */
4568          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
4569            if (reqInfoBuilder_ != null) {
4570              return reqInfoBuilder_.getMessageOrBuilder();
4571            } else {
4572              return reqInfo_;
4573            }
4574          }
4575          /**
4576           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4577           */
4578          private com.google.protobuf.SingleFieldBuilder<
4579              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
4580              getReqInfoFieldBuilder() {
4581            if (reqInfoBuilder_ == null) {
4582              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
4583                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
4584                      reqInfo_,
4585                      getParentForChildren(),
4586                      isClean());
4587              reqInfo_ = null;
4588            }
4589            return reqInfoBuilder_;
4590          }
4591    
4592          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.HeartbeatRequestProto)
4593        }
4594    
4595        static {
4596          defaultInstance = new HeartbeatRequestProto(true);
4597          defaultInstance.initFields();
4598        }
4599    
4600        // @@protoc_insertion_point(class_scope:hadoop.hdfs.HeartbeatRequestProto)
4601      }
4602    
4603      public interface HeartbeatResponseProtoOrBuilder
4604          extends com.google.protobuf.MessageOrBuilder {
4605      }
4606      /**
4607       * Protobuf type {@code hadoop.hdfs.HeartbeatResponseProto}
4608       *
4609       * <pre>
4610       * void response
4611       * </pre>
4612       */
4613      public static final class HeartbeatResponseProto extends
4614          com.google.protobuf.GeneratedMessage
4615          implements HeartbeatResponseProtoOrBuilder {
4616        // Use HeartbeatResponseProto.newBuilder() to construct.
4617        private HeartbeatResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
4618          super(builder);
4619          this.unknownFields = builder.getUnknownFields();
4620        }
4621        private HeartbeatResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
4622    
4623        private static final HeartbeatResponseProto defaultInstance;
4624        public static HeartbeatResponseProto getDefaultInstance() {
4625          return defaultInstance;
4626        }
4627    
4628        public HeartbeatResponseProto getDefaultInstanceForType() {
4629          return defaultInstance;
4630        }
4631    
4632        private final com.google.protobuf.UnknownFieldSet unknownFields;
4633        @java.lang.Override
4634        public final com.google.protobuf.UnknownFieldSet
4635            getUnknownFields() {
4636          return this.unknownFields;
4637        }
4638        private HeartbeatResponseProto(
4639            com.google.protobuf.CodedInputStream input,
4640            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4641            throws com.google.protobuf.InvalidProtocolBufferException {
4642          initFields();
4643          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
4644              com.google.protobuf.UnknownFieldSet.newBuilder();
4645          try {
4646            boolean done = false;
4647            while (!done) {
4648              int tag = input.readTag();
4649              switch (tag) {
4650                case 0:
4651                  done = true;
4652                  break;
4653                default: {
4654                  if (!parseUnknownField(input, unknownFields,
4655                                         extensionRegistry, tag)) {
4656                    done = true;
4657                  }
4658                  break;
4659                }
4660              }
4661            }
4662          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4663            throw e.setUnfinishedMessage(this);
4664          } catch (java.io.IOException e) {
4665            throw new com.google.protobuf.InvalidProtocolBufferException(
4666                e.getMessage()).setUnfinishedMessage(this);
4667          } finally {
4668            this.unknownFields = unknownFields.build();
4669            makeExtensionsImmutable();
4670          }
4671        }
4672        public static final com.google.protobuf.Descriptors.Descriptor
4673            getDescriptor() {
4674          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor;
4675        }
4676    
4677        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4678            internalGetFieldAccessorTable() {
4679          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatResponseProto_fieldAccessorTable
4680              .ensureFieldAccessorsInitialized(
4681                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.Builder.class);
4682        }
4683    
4684        public static com.google.protobuf.Parser<HeartbeatResponseProto> PARSER =
4685            new com.google.protobuf.AbstractParser<HeartbeatResponseProto>() {
4686          public HeartbeatResponseProto parsePartialFrom(
4687              com.google.protobuf.CodedInputStream input,
4688              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4689              throws com.google.protobuf.InvalidProtocolBufferException {
4690            return new HeartbeatResponseProto(input, extensionRegistry);
4691          }
4692        };
4693    
4694        @java.lang.Override
4695        public com.google.protobuf.Parser<HeartbeatResponseProto> getParserForType() {
4696          return PARSER;
4697        }
4698    
4699        private void initFields() {
4700        }
4701        private byte memoizedIsInitialized = -1;
4702        public final boolean isInitialized() {
4703          byte isInitialized = memoizedIsInitialized;
4704          if (isInitialized != -1) return isInitialized == 1;
4705    
4706          memoizedIsInitialized = 1;
4707          return true;
4708        }
4709    
4710        public void writeTo(com.google.protobuf.CodedOutputStream output)
4711                            throws java.io.IOException {
4712          getSerializedSize();
4713          getUnknownFields().writeTo(output);
4714        }
4715    
4716        private int memoizedSerializedSize = -1;
4717        public int getSerializedSize() {
4718          int size = memoizedSerializedSize;
4719          if (size != -1) return size;
4720    
4721          size = 0;
4722          size += getUnknownFields().getSerializedSize();
4723          memoizedSerializedSize = size;
4724          return size;
4725        }
4726    
4727        private static final long serialVersionUID = 0L;
4728        @java.lang.Override
4729        protected java.lang.Object writeReplace()
4730            throws java.io.ObjectStreamException {
4731          return super.writeReplace();
4732        }
4733    
4734        @java.lang.Override
4735        public boolean equals(final java.lang.Object obj) {
4736          if (obj == this) {
4737           return true;
4738          }
4739          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto)) {
4740            return super.equals(obj);
4741          }
4742          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) obj;
4743    
4744          boolean result = true;
4745          result = result &&
4746              getUnknownFields().equals(other.getUnknownFields());
4747          return result;
4748        }
4749    
4750        private int memoizedHashCode = 0;
4751        @java.lang.Override
4752        public int hashCode() {
4753          if (memoizedHashCode != 0) {
4754            return memoizedHashCode;
4755          }
4756          int hash = 41;
4757          hash = (19 * hash) + getDescriptorForType().hashCode();
4758          hash = (29 * hash) + getUnknownFields().hashCode();
4759          memoizedHashCode = hash;
4760          return hash;
4761        }
4762    
4763        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4764            com.google.protobuf.ByteString data)
4765            throws com.google.protobuf.InvalidProtocolBufferException {
4766          return PARSER.parseFrom(data);
4767        }
4768        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4769            com.google.protobuf.ByteString data,
4770            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4771            throws com.google.protobuf.InvalidProtocolBufferException {
4772          return PARSER.parseFrom(data, extensionRegistry);
4773        }
4774        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(byte[] data)
4775            throws com.google.protobuf.InvalidProtocolBufferException {
4776          return PARSER.parseFrom(data);
4777        }
4778        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4779            byte[] data,
4780            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4781            throws com.google.protobuf.InvalidProtocolBufferException {
4782          return PARSER.parseFrom(data, extensionRegistry);
4783        }
4784        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(java.io.InputStream input)
4785            throws java.io.IOException {
4786          return PARSER.parseFrom(input);
4787        }
4788        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4789            java.io.InputStream input,
4790            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4791            throws java.io.IOException {
4792          return PARSER.parseFrom(input, extensionRegistry);
4793        }
4794        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseDelimitedFrom(java.io.InputStream input)
4795            throws java.io.IOException {
4796          return PARSER.parseDelimitedFrom(input);
4797        }
4798        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseDelimitedFrom(
4799            java.io.InputStream input,
4800            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4801            throws java.io.IOException {
4802          return PARSER.parseDelimitedFrom(input, extensionRegistry);
4803        }
4804        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4805            com.google.protobuf.CodedInputStream input)
4806            throws java.io.IOException {
4807          return PARSER.parseFrom(input);
4808        }
4809        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4810            com.google.protobuf.CodedInputStream input,
4811            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4812            throws java.io.IOException {
4813          return PARSER.parseFrom(input, extensionRegistry);
4814        }
4815    
4816        public static Builder newBuilder() { return Builder.create(); }
4817        public Builder newBuilderForType() { return newBuilder(); }
4818        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto prototype) {
4819          return newBuilder().mergeFrom(prototype);
4820        }
4821        public Builder toBuilder() { return newBuilder(this); }
4822    
4823        @java.lang.Override
4824        protected Builder newBuilderForType(
4825            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4826          Builder builder = new Builder(parent);
4827          return builder;
4828        }
4829        /**
4830         * Protobuf type {@code hadoop.hdfs.HeartbeatResponseProto}
4831         *
4832         * <pre>
4833         * void response
4834         * </pre>
4835         */
4836        public static final class Builder extends
4837            com.google.protobuf.GeneratedMessage.Builder<Builder>
4838           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProtoOrBuilder {
4839          public static final com.google.protobuf.Descriptors.Descriptor
4840              getDescriptor() {
4841            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor;
4842          }
4843    
4844          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4845              internalGetFieldAccessorTable() {
4846            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatResponseProto_fieldAccessorTable
4847                .ensureFieldAccessorsInitialized(
4848                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.Builder.class);
4849          }
4850    
4851          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.newBuilder()
4852          private Builder() {
4853            maybeForceBuilderInitialization();
4854          }
4855    
4856          private Builder(
4857              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4858            super(parent);
4859            maybeForceBuilderInitialization();
4860          }
4861          private void maybeForceBuilderInitialization() {
4862            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
4863            }
4864          }
4865          private static Builder create() {
4866            return new Builder();
4867          }
4868    
4869          public Builder clear() {
4870            super.clear();
4871            return this;
4872          }
4873    
4874          public Builder clone() {
4875            return create().mergeFrom(buildPartial());
4876          }
4877    
4878          public com.google.protobuf.Descriptors.Descriptor
4879              getDescriptorForType() {
4880            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor;
4881          }
4882    
4883          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto getDefaultInstanceForType() {
4884            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
4885          }
4886    
4887          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto build() {
4888            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto result = buildPartial();
4889            if (!result.isInitialized()) {
4890              throw newUninitializedMessageException(result);
4891            }
4892            return result;
4893          }
4894    
4895          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto buildPartial() {
4896            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto(this);
4897            onBuilt();
4898            return result;
4899          }
4900    
4901          public Builder mergeFrom(com.google.protobuf.Message other) {
4902            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) {
4903              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto)other);
4904            } else {
4905              super.mergeFrom(other);
4906              return this;
4907            }
4908          }
4909    
4910          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto other) {
4911            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance()) return this;
4912            this.mergeUnknownFields(other.getUnknownFields());
4913            return this;
4914          }
4915    
4916          public final boolean isInitialized() {
4917            return true;
4918          }
4919    
4920          public Builder mergeFrom(
4921              com.google.protobuf.CodedInputStream input,
4922              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4923              throws java.io.IOException {
4924            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parsedMessage = null;
4925            try {
4926              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
4927            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4928              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) e.getUnfinishedMessage();
4929              throw e;
4930            } finally {
4931              if (parsedMessage != null) {
4932                mergeFrom(parsedMessage);
4933              }
4934            }
4935            return this;
4936          }
4937    
4938          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.HeartbeatResponseProto)
4939        }
4940    
4941        static {
4942          defaultInstance = new HeartbeatResponseProto(true);
4943          defaultInstance.initFields();
4944        }
4945    
4946        // @@protoc_insertion_point(class_scope:hadoop.hdfs.HeartbeatResponseProto)
4947      }
4948    
4949      public interface StartLogSegmentRequestProtoOrBuilder
4950          extends com.google.protobuf.MessageOrBuilder {
4951    
4952        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
4953        /**
4954         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4955         */
4956        boolean hasReqInfo();
4957        /**
4958         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4959         */
4960        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
4961        /**
4962         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
4963         */
4964        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
4965    
4966        // required uint64 txid = 2;
4967        /**
4968         * <code>required uint64 txid = 2;</code>
4969         *
4970         * <pre>
4971         * Transaction ID
4972         * </pre>
4973         */
4974        boolean hasTxid();
4975        /**
4976         * <code>required uint64 txid = 2;</code>
4977         *
4978         * <pre>
4979         * Transaction ID
4980         * </pre>
4981         */
4982        long getTxid();
4983      }
4984      /**
4985       * Protobuf type {@code hadoop.hdfs.StartLogSegmentRequestProto}
4986       *
4987       * <pre>
4988       **
4989       * startLogSegment()
4990       * </pre>
4991       */
4992      public static final class StartLogSegmentRequestProto extends
4993          com.google.protobuf.GeneratedMessage
4994          implements StartLogSegmentRequestProtoOrBuilder {
4995        // Use StartLogSegmentRequestProto.newBuilder() to construct.
4996        private StartLogSegmentRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
4997          super(builder);
4998          this.unknownFields = builder.getUnknownFields();
4999        }
5000        private StartLogSegmentRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
5001    
5002        private static final StartLogSegmentRequestProto defaultInstance;
5003        public static StartLogSegmentRequestProto getDefaultInstance() {
5004          return defaultInstance;
5005        }
5006    
5007        public StartLogSegmentRequestProto getDefaultInstanceForType() {
5008          return defaultInstance;
5009        }
5010    
5011        private final com.google.protobuf.UnknownFieldSet unknownFields;
5012        @java.lang.Override
5013        public final com.google.protobuf.UnknownFieldSet
5014            getUnknownFields() {
5015          return this.unknownFields;
5016        }
5017        private StartLogSegmentRequestProto(
5018            com.google.protobuf.CodedInputStream input,
5019            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5020            throws com.google.protobuf.InvalidProtocolBufferException {
5021          initFields();
5022          int mutable_bitField0_ = 0;
5023          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
5024              com.google.protobuf.UnknownFieldSet.newBuilder();
5025          try {
5026            boolean done = false;
5027            while (!done) {
5028              int tag = input.readTag();
5029              switch (tag) {
5030                case 0:
5031                  done = true;
5032                  break;
5033                default: {
5034                  if (!parseUnknownField(input, unknownFields,
5035                                         extensionRegistry, tag)) {
5036                    done = true;
5037                  }
5038                  break;
5039                }
5040                case 10: {
5041                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
5042                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
5043                    subBuilder = reqInfo_.toBuilder();
5044                  }
5045                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
5046                  if (subBuilder != null) {
5047                    subBuilder.mergeFrom(reqInfo_);
5048                    reqInfo_ = subBuilder.buildPartial();
5049                  }
5050                  bitField0_ |= 0x00000001;
5051                  break;
5052                }
5053                case 16: {
5054                  bitField0_ |= 0x00000002;
5055                  txid_ = input.readUInt64();
5056                  break;
5057                }
5058              }
5059            }
5060          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
5061            throw e.setUnfinishedMessage(this);
5062          } catch (java.io.IOException e) {
5063            throw new com.google.protobuf.InvalidProtocolBufferException(
5064                e.getMessage()).setUnfinishedMessage(this);
5065          } finally {
5066            this.unknownFields = unknownFields.build();
5067            makeExtensionsImmutable();
5068          }
5069        }
5070        public static final com.google.protobuf.Descriptors.Descriptor
5071            getDescriptor() {
5072          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor;
5073        }
5074    
5075        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
5076            internalGetFieldAccessorTable() {
5077          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentRequestProto_fieldAccessorTable
5078              .ensureFieldAccessorsInitialized(
5079                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.Builder.class);
5080        }
5081    
5082        public static com.google.protobuf.Parser<StartLogSegmentRequestProto> PARSER =
5083            new com.google.protobuf.AbstractParser<StartLogSegmentRequestProto>() {
5084          public StartLogSegmentRequestProto parsePartialFrom(
5085              com.google.protobuf.CodedInputStream input,
5086              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5087              throws com.google.protobuf.InvalidProtocolBufferException {
5088            return new StartLogSegmentRequestProto(input, extensionRegistry);
5089          }
5090        };
5091    
5092        @java.lang.Override
5093        public com.google.protobuf.Parser<StartLogSegmentRequestProto> getParserForType() {
5094          return PARSER;
5095        }
5096    
5097        private int bitField0_;
5098        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
5099        public static final int REQINFO_FIELD_NUMBER = 1;
5100        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
5101        /**
5102         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5103         */
5104        public boolean hasReqInfo() {
5105          return ((bitField0_ & 0x00000001) == 0x00000001);
5106        }
5107        /**
5108         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5109         */
5110        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
5111          return reqInfo_;
5112        }
5113        /**
5114         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5115         */
5116        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
5117          return reqInfo_;
5118        }
5119    
5120        // required uint64 txid = 2;
5121        public static final int TXID_FIELD_NUMBER = 2;
5122        private long txid_;
5123        /**
5124         * <code>required uint64 txid = 2;</code>
5125         *
5126         * <pre>
5127         * Transaction ID
5128         * </pre>
5129         */
5130        public boolean hasTxid() {
5131          return ((bitField0_ & 0x00000002) == 0x00000002);
5132        }
5133        /**
5134         * <code>required uint64 txid = 2;</code>
5135         *
5136         * <pre>
5137         * Transaction ID
5138         * </pre>
5139         */
5140        public long getTxid() {
5141          return txid_;
5142        }
5143    
5144        private void initFields() {
5145          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
5146          txid_ = 0L;
5147        }
5148        private byte memoizedIsInitialized = -1;
5149        public final boolean isInitialized() {
5150          byte isInitialized = memoizedIsInitialized;
5151          if (isInitialized != -1) return isInitialized == 1;
5152    
5153          if (!hasReqInfo()) {
5154            memoizedIsInitialized = 0;
5155            return false;
5156          }
5157          if (!hasTxid()) {
5158            memoizedIsInitialized = 0;
5159            return false;
5160          }
5161          if (!getReqInfo().isInitialized()) {
5162            memoizedIsInitialized = 0;
5163            return false;
5164          }
5165          memoizedIsInitialized = 1;
5166          return true;
5167        }
5168    
5169        public void writeTo(com.google.protobuf.CodedOutputStream output)
5170                            throws java.io.IOException {
5171          getSerializedSize();
5172          if (((bitField0_ & 0x00000001) == 0x00000001)) {
5173            output.writeMessage(1, reqInfo_);
5174          }
5175          if (((bitField0_ & 0x00000002) == 0x00000002)) {
5176            output.writeUInt64(2, txid_);
5177          }
5178          getUnknownFields().writeTo(output);
5179        }
5180    
5181        private int memoizedSerializedSize = -1;
5182        public int getSerializedSize() {
5183          int size = memoizedSerializedSize;
5184          if (size != -1) return size;
5185    
5186          size = 0;
5187          if (((bitField0_ & 0x00000001) == 0x00000001)) {
5188            size += com.google.protobuf.CodedOutputStream
5189              .computeMessageSize(1, reqInfo_);
5190          }
5191          if (((bitField0_ & 0x00000002) == 0x00000002)) {
5192            size += com.google.protobuf.CodedOutputStream
5193              .computeUInt64Size(2, txid_);
5194          }
5195          size += getUnknownFields().getSerializedSize();
5196          memoizedSerializedSize = size;
5197          return size;
5198        }
5199    
5200        private static final long serialVersionUID = 0L;
5201        @java.lang.Override
5202        protected java.lang.Object writeReplace()
5203            throws java.io.ObjectStreamException {
5204          return super.writeReplace();
5205        }
5206    
5207        @java.lang.Override
5208        public boolean equals(final java.lang.Object obj) {
5209          if (obj == this) {
5210           return true;
5211          }
5212          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)) {
5213            return super.equals(obj);
5214          }
5215          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto) obj;
5216    
5217          boolean result = true;
5218          result = result && (hasReqInfo() == other.hasReqInfo());
5219          if (hasReqInfo()) {
5220            result = result && getReqInfo()
5221                .equals(other.getReqInfo());
5222          }
5223          result = result && (hasTxid() == other.hasTxid());
5224          if (hasTxid()) {
5225            result = result && (getTxid()
5226                == other.getTxid());
5227          }
5228          result = result &&
5229              getUnknownFields().equals(other.getUnknownFields());
5230          return result;
5231        }
5232    
5233        private int memoizedHashCode = 0;
5234        @java.lang.Override
5235        public int hashCode() {
5236          if (memoizedHashCode != 0) {
5237            return memoizedHashCode;
5238          }
5239          int hash = 41;
5240          hash = (19 * hash) + getDescriptorForType().hashCode();
5241          if (hasReqInfo()) {
5242            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
5243            hash = (53 * hash) + getReqInfo().hashCode();
5244          }
5245          if (hasTxid()) {
5246            hash = (37 * hash) + TXID_FIELD_NUMBER;
5247            hash = (53 * hash) + hashLong(getTxid());
5248          }
5249          hash = (29 * hash) + getUnknownFields().hashCode();
5250          memoizedHashCode = hash;
5251          return hash;
5252        }
5253    
5254        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5255            com.google.protobuf.ByteString data)
5256            throws com.google.protobuf.InvalidProtocolBufferException {
5257          return PARSER.parseFrom(data);
5258        }
5259        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5260            com.google.protobuf.ByteString data,
5261            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5262            throws com.google.protobuf.InvalidProtocolBufferException {
5263          return PARSER.parseFrom(data, extensionRegistry);
5264        }
5265        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(byte[] data)
5266            throws com.google.protobuf.InvalidProtocolBufferException {
5267          return PARSER.parseFrom(data);
5268        }
5269        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5270            byte[] data,
5271            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5272            throws com.google.protobuf.InvalidProtocolBufferException {
5273          return PARSER.parseFrom(data, extensionRegistry);
5274        }
5275        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(java.io.InputStream input)
5276            throws java.io.IOException {
5277          return PARSER.parseFrom(input);
5278        }
5279        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5280            java.io.InputStream input,
5281            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5282            throws java.io.IOException {
5283          return PARSER.parseFrom(input, extensionRegistry);
5284        }
5285        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseDelimitedFrom(java.io.InputStream input)
5286            throws java.io.IOException {
5287          return PARSER.parseDelimitedFrom(input);
5288        }
5289        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseDelimitedFrom(
5290            java.io.InputStream input,
5291            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5292            throws java.io.IOException {
5293          return PARSER.parseDelimitedFrom(input, extensionRegistry);
5294        }
5295        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5296            com.google.protobuf.CodedInputStream input)
5297            throws java.io.IOException {
5298          return PARSER.parseFrom(input);
5299        }
5300        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5301            com.google.protobuf.CodedInputStream input,
5302            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5303            throws java.io.IOException {
5304          return PARSER.parseFrom(input, extensionRegistry);
5305        }
5306    
5307        public static Builder newBuilder() { return Builder.create(); }
5308        public Builder newBuilderForType() { return newBuilder(); }
5309        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto prototype) {
5310          return newBuilder().mergeFrom(prototype);
5311        }
5312        public Builder toBuilder() { return newBuilder(this); }
5313    
5314        @java.lang.Override
5315        protected Builder newBuilderForType(
5316            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
5317          Builder builder = new Builder(parent);
5318          return builder;
5319        }
5320        /**
5321         * Protobuf type {@code hadoop.hdfs.StartLogSegmentRequestProto}
5322         *
5323         * <pre>
5324         **
5325         * startLogSegment()
5326         * </pre>
5327         */
5328        public static final class Builder extends
5329            com.google.protobuf.GeneratedMessage.Builder<Builder>
5330           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProtoOrBuilder {
5331          public static final com.google.protobuf.Descriptors.Descriptor
5332              getDescriptor() {
5333            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor;
5334          }
5335    
5336          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
5337              internalGetFieldAccessorTable() {
5338            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentRequestProto_fieldAccessorTable
5339                .ensureFieldAccessorsInitialized(
5340                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.Builder.class);
5341          }
5342    
5343          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.newBuilder()
5344          private Builder() {
5345            maybeForceBuilderInitialization();
5346          }
5347    
5348          private Builder(
5349              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
5350            super(parent);
5351            maybeForceBuilderInitialization();
5352          }
5353          private void maybeForceBuilderInitialization() {
5354            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
5355              getReqInfoFieldBuilder();
5356            }
5357          }
5358          private static Builder create() {
5359            return new Builder();
5360          }
5361    
5362          public Builder clear() {
5363            super.clear();
5364            if (reqInfoBuilder_ == null) {
5365              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
5366            } else {
5367              reqInfoBuilder_.clear();
5368            }
5369            bitField0_ = (bitField0_ & ~0x00000001);
5370            txid_ = 0L;
5371            bitField0_ = (bitField0_ & ~0x00000002);
5372            return this;
5373          }
5374    
5375          public Builder clone() {
5376            return create().mergeFrom(buildPartial());
5377          }
5378    
5379          public com.google.protobuf.Descriptors.Descriptor
5380              getDescriptorForType() {
5381            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor;
5382          }
5383    
5384          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto getDefaultInstanceForType() {
5385            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance();
5386          }
5387    
5388          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto build() {
5389            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto result = buildPartial();
5390            if (!result.isInitialized()) {
5391              throw newUninitializedMessageException(result);
5392            }
5393            return result;
5394          }
5395    
5396          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto buildPartial() {
5397            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto(this);
5398            int from_bitField0_ = bitField0_;
5399            int to_bitField0_ = 0;
5400            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
5401              to_bitField0_ |= 0x00000001;
5402            }
5403            if (reqInfoBuilder_ == null) {
5404              result.reqInfo_ = reqInfo_;
5405            } else {
5406              result.reqInfo_ = reqInfoBuilder_.build();
5407            }
5408            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
5409              to_bitField0_ |= 0x00000002;
5410            }
5411            result.txid_ = txid_;
5412            result.bitField0_ = to_bitField0_;
5413            onBuilt();
5414            return result;
5415          }
5416    
5417          public Builder mergeFrom(com.google.protobuf.Message other) {
5418            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto) {
5419              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)other);
5420            } else {
5421              super.mergeFrom(other);
5422              return this;
5423            }
5424          }
5425    
5426          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto other) {
5427            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance()) return this;
5428            if (other.hasReqInfo()) {
5429              mergeReqInfo(other.getReqInfo());
5430            }
5431            if (other.hasTxid()) {
5432              setTxid(other.getTxid());
5433            }
5434            this.mergeUnknownFields(other.getUnknownFields());
5435            return this;
5436          }
5437    
5438          public final boolean isInitialized() {
5439            if (!hasReqInfo()) {
5440              
5441              return false;
5442            }
5443            if (!hasTxid()) {
5444              
5445              return false;
5446            }
5447            if (!getReqInfo().isInitialized()) {
5448              
5449              return false;
5450            }
5451            return true;
5452          }
5453    
5454          public Builder mergeFrom(
5455              com.google.protobuf.CodedInputStream input,
5456              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5457              throws java.io.IOException {
5458            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parsedMessage = null;
5459            try {
5460              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
5461            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
5462              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto) e.getUnfinishedMessage();
5463              throw e;
5464            } finally {
5465              if (parsedMessage != null) {
5466                mergeFrom(parsedMessage);
5467              }
5468            }
5469            return this;
5470          }
5471          private int bitField0_;
5472    
5473          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
5474          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
5475          private com.google.protobuf.SingleFieldBuilder<
5476              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
5477          /**
5478           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5479           */
5480          public boolean hasReqInfo() {
5481            return ((bitField0_ & 0x00000001) == 0x00000001);
5482          }
5483          /**
5484           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5485           */
5486          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
5487            if (reqInfoBuilder_ == null) {
5488              return reqInfo_;
5489            } else {
5490              return reqInfoBuilder_.getMessage();
5491            }
5492          }
5493          /**
5494           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5495           */
5496          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
5497            if (reqInfoBuilder_ == null) {
5498              if (value == null) {
5499                throw new NullPointerException();
5500              }
5501              reqInfo_ = value;
5502              onChanged();
5503            } else {
5504              reqInfoBuilder_.setMessage(value);
5505            }
5506            bitField0_ |= 0x00000001;
5507            return this;
5508          }
5509          /**
5510           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5511           */
5512          public Builder setReqInfo(
5513              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
5514            if (reqInfoBuilder_ == null) {
5515              reqInfo_ = builderForValue.build();
5516              onChanged();
5517            } else {
5518              reqInfoBuilder_.setMessage(builderForValue.build());
5519            }
5520            bitField0_ |= 0x00000001;
5521            return this;
5522          }
5523          /**
5524           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5525           */
5526          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
5527            if (reqInfoBuilder_ == null) {
5528              if (((bitField0_ & 0x00000001) == 0x00000001) &&
5529                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
5530                reqInfo_ =
5531                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
5532              } else {
5533                reqInfo_ = value;
5534              }
5535              onChanged();
5536            } else {
5537              reqInfoBuilder_.mergeFrom(value);
5538            }
5539            bitField0_ |= 0x00000001;
5540            return this;
5541          }
5542          /**
5543           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5544           */
5545          public Builder clearReqInfo() {
5546            if (reqInfoBuilder_ == null) {
5547              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
5548              onChanged();
5549            } else {
5550              reqInfoBuilder_.clear();
5551            }
5552            bitField0_ = (bitField0_ & ~0x00000001);
5553            return this;
5554          }
5555          /**
5556           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5557           */
5558          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
5559            bitField0_ |= 0x00000001;
5560            onChanged();
5561            return getReqInfoFieldBuilder().getBuilder();
5562          }
5563          /**
5564           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5565           */
5566          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
5567            if (reqInfoBuilder_ != null) {
5568              return reqInfoBuilder_.getMessageOrBuilder();
5569            } else {
5570              return reqInfo_;
5571            }
5572          }
5573          /**
5574           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5575           */
5576          private com.google.protobuf.SingleFieldBuilder<
5577              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
5578              getReqInfoFieldBuilder() {
5579            if (reqInfoBuilder_ == null) {
5580              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
5581                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
5582                      reqInfo_,
5583                      getParentForChildren(),
5584                      isClean());
5585              reqInfo_ = null;
5586            }
5587            return reqInfoBuilder_;
5588          }
5589    
5590          // required uint64 txid = 2;
5591          private long txid_ ;
5592          /**
5593           * <code>required uint64 txid = 2;</code>
5594           *
5595           * <pre>
5596           * Transaction ID
5597           * </pre>
5598           */
5599          public boolean hasTxid() {
5600            return ((bitField0_ & 0x00000002) == 0x00000002);
5601          }
5602          /**
5603           * <code>required uint64 txid = 2;</code>
5604           *
5605           * <pre>
5606           * Transaction ID
5607           * </pre>
5608           */
5609          public long getTxid() {
5610            return txid_;
5611          }
5612          /**
5613           * <code>required uint64 txid = 2;</code>
5614           *
5615           * <pre>
5616           * Transaction ID
5617           * </pre>
5618           */
5619          public Builder setTxid(long value) {
5620            bitField0_ |= 0x00000002;
5621            txid_ = value;
5622            onChanged();
5623            return this;
5624          }
5625          /**
5626           * <code>required uint64 txid = 2;</code>
5627           *
5628           * <pre>
5629           * Transaction ID
5630           * </pre>
5631           */
5632          public Builder clearTxid() {
5633            bitField0_ = (bitField0_ & ~0x00000002);
5634            txid_ = 0L;
5635            onChanged();
5636            return this;
5637          }
5638    
5639          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StartLogSegmentRequestProto)
5640        }
5641    
5642        static {
5643          defaultInstance = new StartLogSegmentRequestProto(true);
5644          defaultInstance.initFields();
5645        }
5646    
5647        // @@protoc_insertion_point(class_scope:hadoop.hdfs.StartLogSegmentRequestProto)
5648      }
5649    
5650      public interface StartLogSegmentResponseProtoOrBuilder
5651          extends com.google.protobuf.MessageOrBuilder {
5652      }
5653      /**
5654       * Protobuf type {@code hadoop.hdfs.StartLogSegmentResponseProto}
5655       */
5656      public static final class StartLogSegmentResponseProto extends
5657          com.google.protobuf.GeneratedMessage
5658          implements StartLogSegmentResponseProtoOrBuilder {
5659        // Use StartLogSegmentResponseProto.newBuilder() to construct.
5660        private StartLogSegmentResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
5661          super(builder);
5662          this.unknownFields = builder.getUnknownFields();
5663        }
5664        private StartLogSegmentResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
5665    
5666        private static final StartLogSegmentResponseProto defaultInstance;
5667        public static StartLogSegmentResponseProto getDefaultInstance() {
5668          return defaultInstance;
5669        }
5670    
5671        public StartLogSegmentResponseProto getDefaultInstanceForType() {
5672          return defaultInstance;
5673        }
5674    
5675        private final com.google.protobuf.UnknownFieldSet unknownFields;
5676        @java.lang.Override
5677        public final com.google.protobuf.UnknownFieldSet
5678            getUnknownFields() {
5679          return this.unknownFields;
5680        }
5681        private StartLogSegmentResponseProto(
5682            com.google.protobuf.CodedInputStream input,
5683            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5684            throws com.google.protobuf.InvalidProtocolBufferException {
5685          initFields();
5686          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
5687              com.google.protobuf.UnknownFieldSet.newBuilder();
5688          try {
5689            boolean done = false;
5690            while (!done) {
5691              int tag = input.readTag();
5692              switch (tag) {
5693                case 0:
5694                  done = true;
5695                  break;
5696                default: {
5697                  if (!parseUnknownField(input, unknownFields,
5698                                         extensionRegistry, tag)) {
5699                    done = true;
5700                  }
5701                  break;
5702                }
5703              }
5704            }
5705          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
5706            throw e.setUnfinishedMessage(this);
5707          } catch (java.io.IOException e) {
5708            throw new com.google.protobuf.InvalidProtocolBufferException(
5709                e.getMessage()).setUnfinishedMessage(this);
5710          } finally {
5711            this.unknownFields = unknownFields.build();
5712            makeExtensionsImmutable();
5713          }
5714        }
5715        public static final com.google.protobuf.Descriptors.Descriptor
5716            getDescriptor() {
5717          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor;
5718        }
5719    
5720        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
5721            internalGetFieldAccessorTable() {
5722          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentResponseProto_fieldAccessorTable
5723              .ensureFieldAccessorsInitialized(
5724                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.Builder.class);
5725        }
5726    
5727        public static com.google.protobuf.Parser<StartLogSegmentResponseProto> PARSER =
5728            new com.google.protobuf.AbstractParser<StartLogSegmentResponseProto>() {
5729          public StartLogSegmentResponseProto parsePartialFrom(
5730              com.google.protobuf.CodedInputStream input,
5731              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5732              throws com.google.protobuf.InvalidProtocolBufferException {
5733            return new StartLogSegmentResponseProto(input, extensionRegistry);
5734          }
5735        };
5736    
5737        @java.lang.Override
5738        public com.google.protobuf.Parser<StartLogSegmentResponseProto> getParserForType() {
5739          return PARSER;
5740        }
5741    
5742        private void initFields() {
5743        }
5744        private byte memoizedIsInitialized = -1;
5745        public final boolean isInitialized() {
5746          byte isInitialized = memoizedIsInitialized;
5747          if (isInitialized != -1) return isInitialized == 1;
5748    
5749          memoizedIsInitialized = 1;
5750          return true;
5751        }
5752    
5753        public void writeTo(com.google.protobuf.CodedOutputStream output)
5754                            throws java.io.IOException {
5755          getSerializedSize();
5756          getUnknownFields().writeTo(output);
5757        }
5758    
5759        private int memoizedSerializedSize = -1;
5760        public int getSerializedSize() {
5761          int size = memoizedSerializedSize;
5762          if (size != -1) return size;
5763    
5764          size = 0;
5765          size += getUnknownFields().getSerializedSize();
5766          memoizedSerializedSize = size;
5767          return size;
5768        }
5769    
5770        private static final long serialVersionUID = 0L;
5771        @java.lang.Override
5772        protected java.lang.Object writeReplace()
5773            throws java.io.ObjectStreamException {
5774          return super.writeReplace();
5775        }
5776    
5777        @java.lang.Override
5778        public boolean equals(final java.lang.Object obj) {
5779          if (obj == this) {
5780           return true;
5781          }
5782          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto)) {
5783            return super.equals(obj);
5784          }
5785          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) obj;
5786    
5787          boolean result = true;
5788          result = result &&
5789              getUnknownFields().equals(other.getUnknownFields());
5790          return result;
5791        }
5792    
5793        private int memoizedHashCode = 0;
5794        @java.lang.Override
5795        public int hashCode() {
5796          if (memoizedHashCode != 0) {
5797            return memoizedHashCode;
5798          }
5799          int hash = 41;
5800          hash = (19 * hash) + getDescriptorForType().hashCode();
5801          hash = (29 * hash) + getUnknownFields().hashCode();
5802          memoizedHashCode = hash;
5803          return hash;
5804        }
5805    
5806        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5807            com.google.protobuf.ByteString data)
5808            throws com.google.protobuf.InvalidProtocolBufferException {
5809          return PARSER.parseFrom(data);
5810        }
5811        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5812            com.google.protobuf.ByteString data,
5813            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5814            throws com.google.protobuf.InvalidProtocolBufferException {
5815          return PARSER.parseFrom(data, extensionRegistry);
5816        }
5817        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(byte[] data)
5818            throws com.google.protobuf.InvalidProtocolBufferException {
5819          return PARSER.parseFrom(data);
5820        }
5821        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5822            byte[] data,
5823            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5824            throws com.google.protobuf.InvalidProtocolBufferException {
5825          return PARSER.parseFrom(data, extensionRegistry);
5826        }
5827        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(java.io.InputStream input)
5828            throws java.io.IOException {
5829          return PARSER.parseFrom(input);
5830        }
5831        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5832            java.io.InputStream input,
5833            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5834            throws java.io.IOException {
5835          return PARSER.parseFrom(input, extensionRegistry);
5836        }
5837        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseDelimitedFrom(java.io.InputStream input)
5838            throws java.io.IOException {
5839          return PARSER.parseDelimitedFrom(input);
5840        }
5841        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseDelimitedFrom(
5842            java.io.InputStream input,
5843            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5844            throws java.io.IOException {
5845          return PARSER.parseDelimitedFrom(input, extensionRegistry);
5846        }
5847        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5848            com.google.protobuf.CodedInputStream input)
5849            throws java.io.IOException {
5850          return PARSER.parseFrom(input);
5851        }
5852        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5853            com.google.protobuf.CodedInputStream input,
5854            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5855            throws java.io.IOException {
5856          return PARSER.parseFrom(input, extensionRegistry);
5857        }
5858    
5859        public static Builder newBuilder() { return Builder.create(); }
5860        public Builder newBuilderForType() { return newBuilder(); }
5861        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto prototype) {
5862          return newBuilder().mergeFrom(prototype);
5863        }
5864        public Builder toBuilder() { return newBuilder(this); }
5865    
5866        @java.lang.Override
5867        protected Builder newBuilderForType(
5868            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
5869          Builder builder = new Builder(parent);
5870          return builder;
5871        }
5872        /**
5873         * Protobuf type {@code hadoop.hdfs.StartLogSegmentResponseProto}
5874         */
5875        public static final class Builder extends
5876            com.google.protobuf.GeneratedMessage.Builder<Builder>
5877           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProtoOrBuilder {
5878          public static final com.google.protobuf.Descriptors.Descriptor
5879              getDescriptor() {
5880            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor;
5881          }
5882    
5883          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
5884              internalGetFieldAccessorTable() {
5885            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentResponseProto_fieldAccessorTable
5886                .ensureFieldAccessorsInitialized(
5887                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.Builder.class);
5888          }
5889    
5890          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.newBuilder()
5891          private Builder() {
5892            maybeForceBuilderInitialization();
5893          }
5894    
5895          private Builder(
5896              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
5897            super(parent);
5898            maybeForceBuilderInitialization();
5899          }
5900          private void maybeForceBuilderInitialization() {
5901            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
5902            }
5903          }
5904          private static Builder create() {
5905            return new Builder();
5906          }
5907    
5908          public Builder clear() {
5909            super.clear();
5910            return this;
5911          }
5912    
5913          public Builder clone() {
5914            return create().mergeFrom(buildPartial());
5915          }
5916    
5917          public com.google.protobuf.Descriptors.Descriptor
5918              getDescriptorForType() {
5919            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor;
5920          }
5921    
5922          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto getDefaultInstanceForType() {
5923            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance();
5924          }
5925    
5926          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto build() {
5927            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto result = buildPartial();
5928            if (!result.isInitialized()) {
5929              throw newUninitializedMessageException(result);
5930            }
5931            return result;
5932          }
5933    
5934          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto buildPartial() {
5935            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto(this);
5936            onBuilt();
5937            return result;
5938          }
5939    
5940          public Builder mergeFrom(com.google.protobuf.Message other) {
5941            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) {
5942              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto)other);
5943            } else {
5944              super.mergeFrom(other);
5945              return this;
5946            }
5947          }
5948    
5949          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto other) {
5950            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance()) return this;
5951            this.mergeUnknownFields(other.getUnknownFields());
5952            return this;
5953          }
5954    
5955          public final boolean isInitialized() {
5956            return true;
5957          }
5958    
5959          public Builder mergeFrom(
5960              com.google.protobuf.CodedInputStream input,
5961              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5962              throws java.io.IOException {
5963            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parsedMessage = null;
5964            try {
5965              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
5966            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
5967              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) e.getUnfinishedMessage();
5968              throw e;
5969            } finally {
5970              if (parsedMessage != null) {
5971                mergeFrom(parsedMessage);
5972              }
5973            }
5974            return this;
5975          }
5976    
5977          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StartLogSegmentResponseProto)
5978        }
5979    
5980        static {
5981          defaultInstance = new StartLogSegmentResponseProto(true);
5982          defaultInstance.initFields();
5983        }
5984    
5985        // @@protoc_insertion_point(class_scope:hadoop.hdfs.StartLogSegmentResponseProto)
5986      }
5987    
5988      public interface FinalizeLogSegmentRequestProtoOrBuilder
5989          extends com.google.protobuf.MessageOrBuilder {
5990    
5991        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
5992        /**
5993         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5994         */
5995        boolean hasReqInfo();
5996        /**
5997         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
5998         */
5999        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
6000        /**
6001         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6002         */
6003        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
6004    
6005        // required uint64 startTxId = 2;
6006        /**
6007         * <code>required uint64 startTxId = 2;</code>
6008         */
6009        boolean hasStartTxId();
6010        /**
6011         * <code>required uint64 startTxId = 2;</code>
6012         */
6013        long getStartTxId();
6014    
6015        // required uint64 endTxId = 3;
6016        /**
6017         * <code>required uint64 endTxId = 3;</code>
6018         */
6019        boolean hasEndTxId();
6020        /**
6021         * <code>required uint64 endTxId = 3;</code>
6022         */
6023        long getEndTxId();
6024      }
6025      /**
6026       * Protobuf type {@code hadoop.hdfs.FinalizeLogSegmentRequestProto}
6027       *
6028       * <pre>
6029       **
6030       * finalizeLogSegment()
6031       * </pre>
6032       */
6033      public static final class FinalizeLogSegmentRequestProto extends
6034          com.google.protobuf.GeneratedMessage
6035          implements FinalizeLogSegmentRequestProtoOrBuilder {
6036        // Use FinalizeLogSegmentRequestProto.newBuilder() to construct.
6037        private FinalizeLogSegmentRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
6038          super(builder);
6039          this.unknownFields = builder.getUnknownFields();
6040        }
6041        private FinalizeLogSegmentRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
6042    
6043        private static final FinalizeLogSegmentRequestProto defaultInstance;
6044        public static FinalizeLogSegmentRequestProto getDefaultInstance() {
6045          return defaultInstance;
6046        }
6047    
6048        public FinalizeLogSegmentRequestProto getDefaultInstanceForType() {
6049          return defaultInstance;
6050        }
6051    
6052        private final com.google.protobuf.UnknownFieldSet unknownFields;
6053        @java.lang.Override
6054        public final com.google.protobuf.UnknownFieldSet
6055            getUnknownFields() {
6056          return this.unknownFields;
6057        }
6058        private FinalizeLogSegmentRequestProto(
6059            com.google.protobuf.CodedInputStream input,
6060            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6061            throws com.google.protobuf.InvalidProtocolBufferException {
6062          initFields();
6063          int mutable_bitField0_ = 0;
6064          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
6065              com.google.protobuf.UnknownFieldSet.newBuilder();
6066          try {
6067            boolean done = false;
6068            while (!done) {
6069              int tag = input.readTag();
6070              switch (tag) {
6071                case 0:
6072                  done = true;
6073                  break;
6074                default: {
6075                  if (!parseUnknownField(input, unknownFields,
6076                                         extensionRegistry, tag)) {
6077                    done = true;
6078                  }
6079                  break;
6080                }
6081                case 10: {
6082                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
6083                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
6084                    subBuilder = reqInfo_.toBuilder();
6085                  }
6086                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
6087                  if (subBuilder != null) {
6088                    subBuilder.mergeFrom(reqInfo_);
6089                    reqInfo_ = subBuilder.buildPartial();
6090                  }
6091                  bitField0_ |= 0x00000001;
6092                  break;
6093                }
6094                case 16: {
6095                  bitField0_ |= 0x00000002;
6096                  startTxId_ = input.readUInt64();
6097                  break;
6098                }
6099                case 24: {
6100                  bitField0_ |= 0x00000004;
6101                  endTxId_ = input.readUInt64();
6102                  break;
6103                }
6104              }
6105            }
6106          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
6107            throw e.setUnfinishedMessage(this);
6108          } catch (java.io.IOException e) {
6109            throw new com.google.protobuf.InvalidProtocolBufferException(
6110                e.getMessage()).setUnfinishedMessage(this);
6111          } finally {
6112            this.unknownFields = unknownFields.build();
6113            makeExtensionsImmutable();
6114          }
6115        }
6116        public static final com.google.protobuf.Descriptors.Descriptor
6117            getDescriptor() {
6118          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor;
6119        }
6120    
6121        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6122            internalGetFieldAccessorTable() {
6123          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_fieldAccessorTable
6124              .ensureFieldAccessorsInitialized(
6125                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.Builder.class);
6126        }
6127    
6128        public static com.google.protobuf.Parser<FinalizeLogSegmentRequestProto> PARSER =
6129            new com.google.protobuf.AbstractParser<FinalizeLogSegmentRequestProto>() {
6130          public FinalizeLogSegmentRequestProto parsePartialFrom(
6131              com.google.protobuf.CodedInputStream input,
6132              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6133              throws com.google.protobuf.InvalidProtocolBufferException {
6134            return new FinalizeLogSegmentRequestProto(input, extensionRegistry);
6135          }
6136        };
6137    
6138        @java.lang.Override
6139        public com.google.protobuf.Parser<FinalizeLogSegmentRequestProto> getParserForType() {
6140          return PARSER;
6141        }
6142    
6143        private int bitField0_;
6144        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
6145        public static final int REQINFO_FIELD_NUMBER = 1;
6146        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
6147        /**
6148         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6149         */
6150        public boolean hasReqInfo() {
6151          return ((bitField0_ & 0x00000001) == 0x00000001);
6152        }
6153        /**
6154         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6155         */
6156        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
6157          return reqInfo_;
6158        }
6159        /**
6160         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6161         */
6162        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
6163          return reqInfo_;
6164        }
6165    
6166        // required uint64 startTxId = 2;
6167        public static final int STARTTXID_FIELD_NUMBER = 2;
6168        private long startTxId_;
6169        /**
6170         * <code>required uint64 startTxId = 2;</code>
6171         */
6172        public boolean hasStartTxId() {
6173          return ((bitField0_ & 0x00000002) == 0x00000002);
6174        }
6175        /**
6176         * <code>required uint64 startTxId = 2;</code>
6177         */
6178        public long getStartTxId() {
6179          return startTxId_;
6180        }
6181    
6182        // required uint64 endTxId = 3;
6183        public static final int ENDTXID_FIELD_NUMBER = 3;
6184        private long endTxId_;
6185        /**
6186         * <code>required uint64 endTxId = 3;</code>
6187         */
6188        public boolean hasEndTxId() {
6189          return ((bitField0_ & 0x00000004) == 0x00000004);
6190        }
6191        /**
6192         * <code>required uint64 endTxId = 3;</code>
6193         */
6194        public long getEndTxId() {
6195          return endTxId_;
6196        }
6197    
6198        private void initFields() {
6199          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
6200          startTxId_ = 0L;
6201          endTxId_ = 0L;
6202        }
6203        private byte memoizedIsInitialized = -1;
6204        public final boolean isInitialized() {
6205          byte isInitialized = memoizedIsInitialized;
6206          if (isInitialized != -1) return isInitialized == 1;
6207    
6208          if (!hasReqInfo()) {
6209            memoizedIsInitialized = 0;
6210            return false;
6211          }
6212          if (!hasStartTxId()) {
6213            memoizedIsInitialized = 0;
6214            return false;
6215          }
6216          if (!hasEndTxId()) {
6217            memoizedIsInitialized = 0;
6218            return false;
6219          }
6220          if (!getReqInfo().isInitialized()) {
6221            memoizedIsInitialized = 0;
6222            return false;
6223          }
6224          memoizedIsInitialized = 1;
6225          return true;
6226        }
6227    
6228        public void writeTo(com.google.protobuf.CodedOutputStream output)
6229                            throws java.io.IOException {
6230          getSerializedSize();
6231          if (((bitField0_ & 0x00000001) == 0x00000001)) {
6232            output.writeMessage(1, reqInfo_);
6233          }
6234          if (((bitField0_ & 0x00000002) == 0x00000002)) {
6235            output.writeUInt64(2, startTxId_);
6236          }
6237          if (((bitField0_ & 0x00000004) == 0x00000004)) {
6238            output.writeUInt64(3, endTxId_);
6239          }
6240          getUnknownFields().writeTo(output);
6241        }
6242    
6243        private int memoizedSerializedSize = -1;
6244        public int getSerializedSize() {
6245          int size = memoizedSerializedSize;
6246          if (size != -1) return size;
6247    
6248          size = 0;
6249          if (((bitField0_ & 0x00000001) == 0x00000001)) {
6250            size += com.google.protobuf.CodedOutputStream
6251              .computeMessageSize(1, reqInfo_);
6252          }
6253          if (((bitField0_ & 0x00000002) == 0x00000002)) {
6254            size += com.google.protobuf.CodedOutputStream
6255              .computeUInt64Size(2, startTxId_);
6256          }
6257          if (((bitField0_ & 0x00000004) == 0x00000004)) {
6258            size += com.google.protobuf.CodedOutputStream
6259              .computeUInt64Size(3, endTxId_);
6260          }
6261          size += getUnknownFields().getSerializedSize();
6262          memoizedSerializedSize = size;
6263          return size;
6264        }
6265    
6266        private static final long serialVersionUID = 0L;
6267        @java.lang.Override
6268        protected java.lang.Object writeReplace()
6269            throws java.io.ObjectStreamException {
6270          return super.writeReplace();
6271        }
6272    
6273        @java.lang.Override
6274        public boolean equals(final java.lang.Object obj) {
6275          if (obj == this) {
6276           return true;
6277          }
6278          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)) {
6279            return super.equals(obj);
6280          }
6281          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto) obj;
6282    
6283          boolean result = true;
6284          result = result && (hasReqInfo() == other.hasReqInfo());
6285          if (hasReqInfo()) {
6286            result = result && getReqInfo()
6287                .equals(other.getReqInfo());
6288          }
6289          result = result && (hasStartTxId() == other.hasStartTxId());
6290          if (hasStartTxId()) {
6291            result = result && (getStartTxId()
6292                == other.getStartTxId());
6293          }
6294          result = result && (hasEndTxId() == other.hasEndTxId());
6295          if (hasEndTxId()) {
6296            result = result && (getEndTxId()
6297                == other.getEndTxId());
6298          }
6299          result = result &&
6300              getUnknownFields().equals(other.getUnknownFields());
6301          return result;
6302        }
6303    
6304        private int memoizedHashCode = 0;
6305        @java.lang.Override
6306        public int hashCode() {
6307          if (memoizedHashCode != 0) {
6308            return memoizedHashCode;
6309          }
6310          int hash = 41;
6311          hash = (19 * hash) + getDescriptorForType().hashCode();
6312          if (hasReqInfo()) {
6313            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
6314            hash = (53 * hash) + getReqInfo().hashCode();
6315          }
6316          if (hasStartTxId()) {
6317            hash = (37 * hash) + STARTTXID_FIELD_NUMBER;
6318            hash = (53 * hash) + hashLong(getStartTxId());
6319          }
6320          if (hasEndTxId()) {
6321            hash = (37 * hash) + ENDTXID_FIELD_NUMBER;
6322            hash = (53 * hash) + hashLong(getEndTxId());
6323          }
6324          hash = (29 * hash) + getUnknownFields().hashCode();
6325          memoizedHashCode = hash;
6326          return hash;
6327        }
6328    
6329        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6330            com.google.protobuf.ByteString data)
6331            throws com.google.protobuf.InvalidProtocolBufferException {
6332          return PARSER.parseFrom(data);
6333        }
6334        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6335            com.google.protobuf.ByteString data,
6336            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6337            throws com.google.protobuf.InvalidProtocolBufferException {
6338          return PARSER.parseFrom(data, extensionRegistry);
6339        }
6340        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(byte[] data)
6341            throws com.google.protobuf.InvalidProtocolBufferException {
6342          return PARSER.parseFrom(data);
6343        }
6344        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6345            byte[] data,
6346            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6347            throws com.google.protobuf.InvalidProtocolBufferException {
6348          return PARSER.parseFrom(data, extensionRegistry);
6349        }
6350        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(java.io.InputStream input)
6351            throws java.io.IOException {
6352          return PARSER.parseFrom(input);
6353        }
6354        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6355            java.io.InputStream input,
6356            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6357            throws java.io.IOException {
6358          return PARSER.parseFrom(input, extensionRegistry);
6359        }
6360        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseDelimitedFrom(java.io.InputStream input)
6361            throws java.io.IOException {
6362          return PARSER.parseDelimitedFrom(input);
6363        }
6364        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseDelimitedFrom(
6365            java.io.InputStream input,
6366            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6367            throws java.io.IOException {
6368          return PARSER.parseDelimitedFrom(input, extensionRegistry);
6369        }
6370        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6371            com.google.protobuf.CodedInputStream input)
6372            throws java.io.IOException {
6373          return PARSER.parseFrom(input);
6374        }
6375        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6376            com.google.protobuf.CodedInputStream input,
6377            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6378            throws java.io.IOException {
6379          return PARSER.parseFrom(input, extensionRegistry);
6380        }
6381    
6382        public static Builder newBuilder() { return Builder.create(); }
6383        public Builder newBuilderForType() { return newBuilder(); }
6384        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto prototype) {
6385          return newBuilder().mergeFrom(prototype);
6386        }
6387        public Builder toBuilder() { return newBuilder(this); }
6388    
6389        @java.lang.Override
6390        protected Builder newBuilderForType(
6391            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
6392          Builder builder = new Builder(parent);
6393          return builder;
6394        }
6395        /**
6396         * Protobuf type {@code hadoop.hdfs.FinalizeLogSegmentRequestProto}
6397         *
6398         * <pre>
6399         **
6400         * finalizeLogSegment()
6401         * </pre>
6402         */
6403        public static final class Builder extends
6404            com.google.protobuf.GeneratedMessage.Builder<Builder>
6405           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProtoOrBuilder {
6406          public static final com.google.protobuf.Descriptors.Descriptor
6407              getDescriptor() {
6408            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor;
6409          }
6410    
6411          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6412              internalGetFieldAccessorTable() {
6413            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_fieldAccessorTable
6414                .ensureFieldAccessorsInitialized(
6415                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.Builder.class);
6416          }
6417    
6418          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.newBuilder()
6419          private Builder() {
6420            maybeForceBuilderInitialization();
6421          }
6422    
6423          private Builder(
6424              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
6425            super(parent);
6426            maybeForceBuilderInitialization();
6427          }
6428          private void maybeForceBuilderInitialization() {
6429            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
6430              getReqInfoFieldBuilder();
6431            }
6432          }
6433          private static Builder create() {
6434            return new Builder();
6435          }
6436    
6437          public Builder clear() {
6438            super.clear();
6439            if (reqInfoBuilder_ == null) {
6440              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
6441            } else {
6442              reqInfoBuilder_.clear();
6443            }
6444            bitField0_ = (bitField0_ & ~0x00000001);
6445            startTxId_ = 0L;
6446            bitField0_ = (bitField0_ & ~0x00000002);
6447            endTxId_ = 0L;
6448            bitField0_ = (bitField0_ & ~0x00000004);
6449            return this;
6450          }
6451    
6452          public Builder clone() {
6453            return create().mergeFrom(buildPartial());
6454          }
6455    
6456          public com.google.protobuf.Descriptors.Descriptor
6457              getDescriptorForType() {
6458            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor;
6459          }
6460    
6461          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto getDefaultInstanceForType() {
6462            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance();
6463          }
6464    
6465          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto build() {
6466            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto result = buildPartial();
6467            if (!result.isInitialized()) {
6468              throw newUninitializedMessageException(result);
6469            }
6470            return result;
6471          }
6472    
6473          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto buildPartial() {
6474            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto(this);
6475            int from_bitField0_ = bitField0_;
6476            int to_bitField0_ = 0;
6477            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
6478              to_bitField0_ |= 0x00000001;
6479            }
6480            if (reqInfoBuilder_ == null) {
6481              result.reqInfo_ = reqInfo_;
6482            } else {
6483              result.reqInfo_ = reqInfoBuilder_.build();
6484            }
6485            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
6486              to_bitField0_ |= 0x00000002;
6487            }
6488            result.startTxId_ = startTxId_;
6489            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
6490              to_bitField0_ |= 0x00000004;
6491            }
6492            result.endTxId_ = endTxId_;
6493            result.bitField0_ = to_bitField0_;
6494            onBuilt();
6495            return result;
6496          }
6497    
6498          public Builder mergeFrom(com.google.protobuf.Message other) {
6499            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto) {
6500              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)other);
6501            } else {
6502              super.mergeFrom(other);
6503              return this;
6504            }
6505          }
6506    
6507          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto other) {
6508            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance()) return this;
6509            if (other.hasReqInfo()) {
6510              mergeReqInfo(other.getReqInfo());
6511            }
6512            if (other.hasStartTxId()) {
6513              setStartTxId(other.getStartTxId());
6514            }
6515            if (other.hasEndTxId()) {
6516              setEndTxId(other.getEndTxId());
6517            }
6518            this.mergeUnknownFields(other.getUnknownFields());
6519            return this;
6520          }
6521    
6522          public final boolean isInitialized() {
6523            if (!hasReqInfo()) {
6524              
6525              return false;
6526            }
6527            if (!hasStartTxId()) {
6528              
6529              return false;
6530            }
6531            if (!hasEndTxId()) {
6532              
6533              return false;
6534            }
6535            if (!getReqInfo().isInitialized()) {
6536              
6537              return false;
6538            }
6539            return true;
6540          }
6541    
6542          public Builder mergeFrom(
6543              com.google.protobuf.CodedInputStream input,
6544              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6545              throws java.io.IOException {
6546            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parsedMessage = null;
6547            try {
6548              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
6549            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
6550              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto) e.getUnfinishedMessage();
6551              throw e;
6552            } finally {
6553              if (parsedMessage != null) {
6554                mergeFrom(parsedMessage);
6555              }
6556            }
6557            return this;
6558          }
6559          private int bitField0_;
6560    
6561          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
6562          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
6563          private com.google.protobuf.SingleFieldBuilder<
6564              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
6565          /**
6566           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6567           */
6568          public boolean hasReqInfo() {
6569            return ((bitField0_ & 0x00000001) == 0x00000001);
6570          }
6571          /**
6572           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6573           */
6574          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
6575            if (reqInfoBuilder_ == null) {
6576              return reqInfo_;
6577            } else {
6578              return reqInfoBuilder_.getMessage();
6579            }
6580          }
6581          /**
6582           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6583           */
6584          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
6585            if (reqInfoBuilder_ == null) {
6586              if (value == null) {
6587                throw new NullPointerException();
6588              }
6589              reqInfo_ = value;
6590              onChanged();
6591            } else {
6592              reqInfoBuilder_.setMessage(value);
6593            }
6594            bitField0_ |= 0x00000001;
6595            return this;
6596          }
6597          /**
6598           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6599           */
6600          public Builder setReqInfo(
6601              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
6602            if (reqInfoBuilder_ == null) {
6603              reqInfo_ = builderForValue.build();
6604              onChanged();
6605            } else {
6606              reqInfoBuilder_.setMessage(builderForValue.build());
6607            }
6608            bitField0_ |= 0x00000001;
6609            return this;
6610          }
6611          /**
6612           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6613           */
6614          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
6615            if (reqInfoBuilder_ == null) {
6616              if (((bitField0_ & 0x00000001) == 0x00000001) &&
6617                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
6618                reqInfo_ =
6619                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
6620              } else {
6621                reqInfo_ = value;
6622              }
6623              onChanged();
6624            } else {
6625              reqInfoBuilder_.mergeFrom(value);
6626            }
6627            bitField0_ |= 0x00000001;
6628            return this;
6629          }
6630          /**
6631           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6632           */
6633          public Builder clearReqInfo() {
6634            if (reqInfoBuilder_ == null) {
6635              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
6636              onChanged();
6637            } else {
6638              reqInfoBuilder_.clear();
6639            }
6640            bitField0_ = (bitField0_ & ~0x00000001);
6641            return this;
6642          }
6643          /**
6644           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6645           */
6646          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
6647            bitField0_ |= 0x00000001;
6648            onChanged();
6649            return getReqInfoFieldBuilder().getBuilder();
6650          }
6651          /**
6652           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6653           */
6654          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
6655            if (reqInfoBuilder_ != null) {
6656              return reqInfoBuilder_.getMessageOrBuilder();
6657            } else {
6658              return reqInfo_;
6659            }
6660          }
6661          /**
6662           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
6663           */
6664          private com.google.protobuf.SingleFieldBuilder<
6665              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
6666              getReqInfoFieldBuilder() {
6667            if (reqInfoBuilder_ == null) {
6668              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
6669                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
6670                      reqInfo_,
6671                      getParentForChildren(),
6672                      isClean());
6673              reqInfo_ = null;
6674            }
6675            return reqInfoBuilder_;
6676          }
6677    
6678          // required uint64 startTxId = 2;
6679          private long startTxId_ ;
6680          /**
6681           * <code>required uint64 startTxId = 2;</code>
6682           */
6683          public boolean hasStartTxId() {
6684            return ((bitField0_ & 0x00000002) == 0x00000002);
6685          }
6686          /**
6687           * <code>required uint64 startTxId = 2;</code>
6688           */
6689          public long getStartTxId() {
6690            return startTxId_;
6691          }
6692          /**
6693           * <code>required uint64 startTxId = 2;</code>
6694           */
6695          public Builder setStartTxId(long value) {
6696            bitField0_ |= 0x00000002;
6697            startTxId_ = value;
6698            onChanged();
6699            return this;
6700          }
6701          /**
6702           * <code>required uint64 startTxId = 2;</code>
6703           */
6704          public Builder clearStartTxId() {
6705            bitField0_ = (bitField0_ & ~0x00000002);
6706            startTxId_ = 0L;
6707            onChanged();
6708            return this;
6709          }
6710    
6711          // required uint64 endTxId = 3;
6712          private long endTxId_ ;
6713          /**
6714           * <code>required uint64 endTxId = 3;</code>
6715           */
6716          public boolean hasEndTxId() {
6717            return ((bitField0_ & 0x00000004) == 0x00000004);
6718          }
6719          /**
6720           * <code>required uint64 endTxId = 3;</code>
6721           */
6722          public long getEndTxId() {
6723            return endTxId_;
6724          }
6725          /**
6726           * <code>required uint64 endTxId = 3;</code>
6727           */
6728          public Builder setEndTxId(long value) {
6729            bitField0_ |= 0x00000004;
6730            endTxId_ = value;
6731            onChanged();
6732            return this;
6733          }
6734          /**
6735           * <code>required uint64 endTxId = 3;</code>
6736           */
6737          public Builder clearEndTxId() {
6738            bitField0_ = (bitField0_ & ~0x00000004);
6739            endTxId_ = 0L;
6740            onChanged();
6741            return this;
6742          }
6743    
6744          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FinalizeLogSegmentRequestProto)
6745        }
6746    
6747        static {
6748          defaultInstance = new FinalizeLogSegmentRequestProto(true);
6749          defaultInstance.initFields();
6750        }
6751    
6752        // @@protoc_insertion_point(class_scope:hadoop.hdfs.FinalizeLogSegmentRequestProto)
6753      }
6754    
6755      public interface FinalizeLogSegmentResponseProtoOrBuilder
6756          extends com.google.protobuf.MessageOrBuilder {
6757      }
6758      /**
6759       * Protobuf type {@code hadoop.hdfs.FinalizeLogSegmentResponseProto}
6760       */
6761      public static final class FinalizeLogSegmentResponseProto extends
6762          com.google.protobuf.GeneratedMessage
6763          implements FinalizeLogSegmentResponseProtoOrBuilder {
6764        // Use FinalizeLogSegmentResponseProto.newBuilder() to construct.
6765        private FinalizeLogSegmentResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
6766          super(builder);
6767          this.unknownFields = builder.getUnknownFields();
6768        }
6769        private FinalizeLogSegmentResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
6770    
6771        private static final FinalizeLogSegmentResponseProto defaultInstance;
6772        public static FinalizeLogSegmentResponseProto getDefaultInstance() {
6773          return defaultInstance;
6774        }
6775    
6776        public FinalizeLogSegmentResponseProto getDefaultInstanceForType() {
6777          return defaultInstance;
6778        }
6779    
6780        private final com.google.protobuf.UnknownFieldSet unknownFields;
6781        @java.lang.Override
6782        public final com.google.protobuf.UnknownFieldSet
6783            getUnknownFields() {
6784          return this.unknownFields;
6785        }
6786        private FinalizeLogSegmentResponseProto(
6787            com.google.protobuf.CodedInputStream input,
6788            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6789            throws com.google.protobuf.InvalidProtocolBufferException {
6790          initFields();
6791          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
6792              com.google.protobuf.UnknownFieldSet.newBuilder();
6793          try {
6794            boolean done = false;
6795            while (!done) {
6796              int tag = input.readTag();
6797              switch (tag) {
6798                case 0:
6799                  done = true;
6800                  break;
6801                default: {
6802                  if (!parseUnknownField(input, unknownFields,
6803                                         extensionRegistry, tag)) {
6804                    done = true;
6805                  }
6806                  break;
6807                }
6808              }
6809            }
6810          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
6811            throw e.setUnfinishedMessage(this);
6812          } catch (java.io.IOException e) {
6813            throw new com.google.protobuf.InvalidProtocolBufferException(
6814                e.getMessage()).setUnfinishedMessage(this);
6815          } finally {
6816            this.unknownFields = unknownFields.build();
6817            makeExtensionsImmutable();
6818          }
6819        }
6820        public static final com.google.protobuf.Descriptors.Descriptor
6821            getDescriptor() {
6822          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor;
6823        }
6824    
6825        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6826            internalGetFieldAccessorTable() {
6827          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_fieldAccessorTable
6828              .ensureFieldAccessorsInitialized(
6829                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.Builder.class);
6830        }
6831    
6832        public static com.google.protobuf.Parser<FinalizeLogSegmentResponseProto> PARSER =
6833            new com.google.protobuf.AbstractParser<FinalizeLogSegmentResponseProto>() {
6834          public FinalizeLogSegmentResponseProto parsePartialFrom(
6835              com.google.protobuf.CodedInputStream input,
6836              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6837              throws com.google.protobuf.InvalidProtocolBufferException {
6838            return new FinalizeLogSegmentResponseProto(input, extensionRegistry);
6839          }
6840        };
6841    
6842        @java.lang.Override
6843        public com.google.protobuf.Parser<FinalizeLogSegmentResponseProto> getParserForType() {
6844          return PARSER;
6845        }
6846    
6847        private void initFields() {
6848        }
6849        private byte memoizedIsInitialized = -1;
6850        public final boolean isInitialized() {
6851          byte isInitialized = memoizedIsInitialized;
6852          if (isInitialized != -1) return isInitialized == 1;
6853    
6854          memoizedIsInitialized = 1;
6855          return true;
6856        }
6857    
6858        public void writeTo(com.google.protobuf.CodedOutputStream output)
6859                            throws java.io.IOException {
6860          getSerializedSize();
6861          getUnknownFields().writeTo(output);
6862        }
6863    
6864        private int memoizedSerializedSize = -1;
6865        public int getSerializedSize() {
6866          int size = memoizedSerializedSize;
6867          if (size != -1) return size;
6868    
6869          size = 0;
6870          size += getUnknownFields().getSerializedSize();
6871          memoizedSerializedSize = size;
6872          return size;
6873        }
6874    
6875        private static final long serialVersionUID = 0L;
6876        @java.lang.Override
6877        protected java.lang.Object writeReplace()
6878            throws java.io.ObjectStreamException {
6879          return super.writeReplace();
6880        }
6881    
6882        @java.lang.Override
6883        public boolean equals(final java.lang.Object obj) {
6884          if (obj == this) {
6885           return true;
6886          }
6887          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto)) {
6888            return super.equals(obj);
6889          }
6890          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) obj;
6891    
6892          boolean result = true;
6893          result = result &&
6894              getUnknownFields().equals(other.getUnknownFields());
6895          return result;
6896        }
6897    
6898        private int memoizedHashCode = 0;
6899        @java.lang.Override
6900        public int hashCode() {
6901          if (memoizedHashCode != 0) {
6902            return memoizedHashCode;
6903          }
6904          int hash = 41;
6905          hash = (19 * hash) + getDescriptorForType().hashCode();
6906          hash = (29 * hash) + getUnknownFields().hashCode();
6907          memoizedHashCode = hash;
6908          return hash;
6909        }
6910    
6911        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
6912            com.google.protobuf.ByteString data)
6913            throws com.google.protobuf.InvalidProtocolBufferException {
6914          return PARSER.parseFrom(data);
6915        }
6916        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
6917            com.google.protobuf.ByteString data,
6918            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6919            throws com.google.protobuf.InvalidProtocolBufferException {
6920          return PARSER.parseFrom(data, extensionRegistry);
6921        }
6922        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(byte[] data)
6923            throws com.google.protobuf.InvalidProtocolBufferException {
6924          return PARSER.parseFrom(data);
6925        }
6926        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
6927            byte[] data,
6928            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6929            throws com.google.protobuf.InvalidProtocolBufferException {
6930          return PARSER.parseFrom(data, extensionRegistry);
6931        }
6932        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(java.io.InputStream input)
6933            throws java.io.IOException {
6934          return PARSER.parseFrom(input);
6935        }
6936        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
6937            java.io.InputStream input,
6938            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6939            throws java.io.IOException {
6940          return PARSER.parseFrom(input, extensionRegistry);
6941        }
6942        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseDelimitedFrom(java.io.InputStream input)
6943            throws java.io.IOException {
6944          return PARSER.parseDelimitedFrom(input);
6945        }
6946        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseDelimitedFrom(
6947            java.io.InputStream input,
6948            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6949            throws java.io.IOException {
6950          return PARSER.parseDelimitedFrom(input, extensionRegistry);
6951        }
6952        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
6953            com.google.protobuf.CodedInputStream input)
6954            throws java.io.IOException {
6955          return PARSER.parseFrom(input);
6956        }
6957        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
6958            com.google.protobuf.CodedInputStream input,
6959            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6960            throws java.io.IOException {
6961          return PARSER.parseFrom(input, extensionRegistry);
6962        }
6963    
6964        public static Builder newBuilder() { return Builder.create(); }
6965        public Builder newBuilderForType() { return newBuilder(); }
6966        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto prototype) {
6967          return newBuilder().mergeFrom(prototype);
6968        }
6969        public Builder toBuilder() { return newBuilder(this); }
6970    
6971        @java.lang.Override
6972        protected Builder newBuilderForType(
6973            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
6974          Builder builder = new Builder(parent);
6975          return builder;
6976        }
6977        /**
6978         * Protobuf type {@code hadoop.hdfs.FinalizeLogSegmentResponseProto}
6979         */
6980        public static final class Builder extends
6981            com.google.protobuf.GeneratedMessage.Builder<Builder>
6982           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProtoOrBuilder {
6983          public static final com.google.protobuf.Descriptors.Descriptor
6984              getDescriptor() {
6985            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor;
6986          }
6987    
6988          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6989              internalGetFieldAccessorTable() {
6990            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_fieldAccessorTable
6991                .ensureFieldAccessorsInitialized(
6992                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.Builder.class);
6993          }
6994    
6995          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.newBuilder()
6996          private Builder() {
6997            maybeForceBuilderInitialization();
6998          }
6999    
7000          private Builder(
7001              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7002            super(parent);
7003            maybeForceBuilderInitialization();
7004          }
7005          private void maybeForceBuilderInitialization() {
7006            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
7007            }
7008          }
7009          private static Builder create() {
7010            return new Builder();
7011          }
7012    
7013          public Builder clear() {
7014            super.clear();
7015            return this;
7016          }
7017    
7018          public Builder clone() {
7019            return create().mergeFrom(buildPartial());
7020          }
7021    
7022          public com.google.protobuf.Descriptors.Descriptor
7023              getDescriptorForType() {
7024            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor;
7025          }
7026    
7027          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto getDefaultInstanceForType() {
7028            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance();
7029          }
7030    
7031          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto build() {
7032            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto result = buildPartial();
7033            if (!result.isInitialized()) {
7034              throw newUninitializedMessageException(result);
7035            }
7036            return result;
7037          }
7038    
7039          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto buildPartial() {
7040            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto(this);
7041            onBuilt();
7042            return result;
7043          }
7044    
7045          public Builder mergeFrom(com.google.protobuf.Message other) {
7046            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) {
7047              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto)other);
7048            } else {
7049              super.mergeFrom(other);
7050              return this;
7051            }
7052          }
7053    
7054          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto other) {
7055            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance()) return this;
7056            this.mergeUnknownFields(other.getUnknownFields());
7057            return this;
7058          }
7059    
7060          public final boolean isInitialized() {
7061            return true;
7062          }
7063    
7064          public Builder mergeFrom(
7065              com.google.protobuf.CodedInputStream input,
7066              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7067              throws java.io.IOException {
7068            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parsedMessage = null;
7069            try {
7070              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
7071            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
7072              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) e.getUnfinishedMessage();
7073              throw e;
7074            } finally {
7075              if (parsedMessage != null) {
7076                mergeFrom(parsedMessage);
7077              }
7078            }
7079            return this;
7080          }
7081    
7082          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FinalizeLogSegmentResponseProto)
7083        }
7084    
7085        static {
7086          defaultInstance = new FinalizeLogSegmentResponseProto(true);
7087          defaultInstance.initFields();
7088        }
7089    
7090        // @@protoc_insertion_point(class_scope:hadoop.hdfs.FinalizeLogSegmentResponseProto)
7091      }
7092    
7093      public interface PurgeLogsRequestProtoOrBuilder
7094          extends com.google.protobuf.MessageOrBuilder {
7095    
7096        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
7097        /**
7098         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7099         */
7100        boolean hasReqInfo();
7101        /**
7102         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7103         */
7104        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
7105        /**
7106         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7107         */
7108        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
7109    
7110        // required uint64 minTxIdToKeep = 2;
7111        /**
7112         * <code>required uint64 minTxIdToKeep = 2;</code>
7113         */
7114        boolean hasMinTxIdToKeep();
7115        /**
7116         * <code>required uint64 minTxIdToKeep = 2;</code>
7117         */
7118        long getMinTxIdToKeep();
7119      }
7120      /**
7121       * Protobuf type {@code hadoop.hdfs.PurgeLogsRequestProto}
7122       *
7123       * <pre>
7124       **
7125       * purgeLogs()
7126       * </pre>
7127       */
7128      public static final class PurgeLogsRequestProto extends
7129          com.google.protobuf.GeneratedMessage
7130          implements PurgeLogsRequestProtoOrBuilder {
7131        // Use PurgeLogsRequestProto.newBuilder() to construct.
7132        private PurgeLogsRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
7133          super(builder);
7134          this.unknownFields = builder.getUnknownFields();
7135        }
7136        private PurgeLogsRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
7137    
7138        private static final PurgeLogsRequestProto defaultInstance;
7139        public static PurgeLogsRequestProto getDefaultInstance() {
7140          return defaultInstance;
7141        }
7142    
7143        public PurgeLogsRequestProto getDefaultInstanceForType() {
7144          return defaultInstance;
7145        }
7146    
7147        private final com.google.protobuf.UnknownFieldSet unknownFields;
7148        @java.lang.Override
7149        public final com.google.protobuf.UnknownFieldSet
7150            getUnknownFields() {
7151          return this.unknownFields;
7152        }
7153        private PurgeLogsRequestProto(
7154            com.google.protobuf.CodedInputStream input,
7155            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7156            throws com.google.protobuf.InvalidProtocolBufferException {
7157          initFields();
7158          int mutable_bitField0_ = 0;
7159          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
7160              com.google.protobuf.UnknownFieldSet.newBuilder();
7161          try {
7162            boolean done = false;
7163            while (!done) {
7164              int tag = input.readTag();
7165              switch (tag) {
7166                case 0:
7167                  done = true;
7168                  break;
7169                default: {
7170                  if (!parseUnknownField(input, unknownFields,
7171                                         extensionRegistry, tag)) {
7172                    done = true;
7173                  }
7174                  break;
7175                }
7176                case 10: {
7177                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
7178                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
7179                    subBuilder = reqInfo_.toBuilder();
7180                  }
7181                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
7182                  if (subBuilder != null) {
7183                    subBuilder.mergeFrom(reqInfo_);
7184                    reqInfo_ = subBuilder.buildPartial();
7185                  }
7186                  bitField0_ |= 0x00000001;
7187                  break;
7188                }
7189                case 16: {
7190                  bitField0_ |= 0x00000002;
7191                  minTxIdToKeep_ = input.readUInt64();
7192                  break;
7193                }
7194              }
7195            }
7196          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
7197            throw e.setUnfinishedMessage(this);
7198          } catch (java.io.IOException e) {
7199            throw new com.google.protobuf.InvalidProtocolBufferException(
7200                e.getMessage()).setUnfinishedMessage(this);
7201          } finally {
7202            this.unknownFields = unknownFields.build();
7203            makeExtensionsImmutable();
7204          }
7205        }
7206        public static final com.google.protobuf.Descriptors.Descriptor
7207            getDescriptor() {
7208          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor;
7209        }
7210    
7211        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7212            internalGetFieldAccessorTable() {
7213          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsRequestProto_fieldAccessorTable
7214              .ensureFieldAccessorsInitialized(
7215                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.Builder.class);
7216        }
7217    
7218        public static com.google.protobuf.Parser<PurgeLogsRequestProto> PARSER =
7219            new com.google.protobuf.AbstractParser<PurgeLogsRequestProto>() {
7220          public PurgeLogsRequestProto parsePartialFrom(
7221              com.google.protobuf.CodedInputStream input,
7222              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7223              throws com.google.protobuf.InvalidProtocolBufferException {
7224            return new PurgeLogsRequestProto(input, extensionRegistry);
7225          }
7226        };
7227    
7228        @java.lang.Override
7229        public com.google.protobuf.Parser<PurgeLogsRequestProto> getParserForType() {
7230          return PARSER;
7231        }
7232    
7233        private int bitField0_;
7234        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
7235        public static final int REQINFO_FIELD_NUMBER = 1;
7236        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
7237        /**
7238         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7239         */
7240        public boolean hasReqInfo() {
7241          return ((bitField0_ & 0x00000001) == 0x00000001);
7242        }
7243        /**
7244         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7245         */
7246        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
7247          return reqInfo_;
7248        }
7249        /**
7250         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7251         */
7252        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
7253          return reqInfo_;
7254        }
7255    
7256        // required uint64 minTxIdToKeep = 2;
7257        public static final int MINTXIDTOKEEP_FIELD_NUMBER = 2;
7258        private long minTxIdToKeep_;
7259        /**
7260         * <code>required uint64 minTxIdToKeep = 2;</code>
7261         */
7262        public boolean hasMinTxIdToKeep() {
7263          return ((bitField0_ & 0x00000002) == 0x00000002);
7264        }
7265        /**
7266         * <code>required uint64 minTxIdToKeep = 2;</code>
7267         */
7268        public long getMinTxIdToKeep() {
7269          return minTxIdToKeep_;
7270        }
7271    
7272        private void initFields() {
7273          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
7274          minTxIdToKeep_ = 0L;
7275        }
7276        private byte memoizedIsInitialized = -1;
7277        public final boolean isInitialized() {
7278          byte isInitialized = memoizedIsInitialized;
7279          if (isInitialized != -1) return isInitialized == 1;
7280    
7281          if (!hasReqInfo()) {
7282            memoizedIsInitialized = 0;
7283            return false;
7284          }
7285          if (!hasMinTxIdToKeep()) {
7286            memoizedIsInitialized = 0;
7287            return false;
7288          }
7289          if (!getReqInfo().isInitialized()) {
7290            memoizedIsInitialized = 0;
7291            return false;
7292          }
7293          memoizedIsInitialized = 1;
7294          return true;
7295        }
7296    
7297        public void writeTo(com.google.protobuf.CodedOutputStream output)
7298                            throws java.io.IOException {
7299          getSerializedSize();
7300          if (((bitField0_ & 0x00000001) == 0x00000001)) {
7301            output.writeMessage(1, reqInfo_);
7302          }
7303          if (((bitField0_ & 0x00000002) == 0x00000002)) {
7304            output.writeUInt64(2, minTxIdToKeep_);
7305          }
7306          getUnknownFields().writeTo(output);
7307        }
7308    
7309        private int memoizedSerializedSize = -1;
7310        public int getSerializedSize() {
7311          int size = memoizedSerializedSize;
7312          if (size != -1) return size;
7313    
7314          size = 0;
7315          if (((bitField0_ & 0x00000001) == 0x00000001)) {
7316            size += com.google.protobuf.CodedOutputStream
7317              .computeMessageSize(1, reqInfo_);
7318          }
7319          if (((bitField0_ & 0x00000002) == 0x00000002)) {
7320            size += com.google.protobuf.CodedOutputStream
7321              .computeUInt64Size(2, minTxIdToKeep_);
7322          }
7323          size += getUnknownFields().getSerializedSize();
7324          memoizedSerializedSize = size;
7325          return size;
7326        }
7327    
7328        private static final long serialVersionUID = 0L;
7329        @java.lang.Override
7330        protected java.lang.Object writeReplace()
7331            throws java.io.ObjectStreamException {
7332          return super.writeReplace();
7333        }
7334    
7335        @java.lang.Override
7336        public boolean equals(final java.lang.Object obj) {
7337          if (obj == this) {
7338           return true;
7339          }
7340          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)) {
7341            return super.equals(obj);
7342          }
7343          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto) obj;
7344    
7345          boolean result = true;
7346          result = result && (hasReqInfo() == other.hasReqInfo());
7347          if (hasReqInfo()) {
7348            result = result && getReqInfo()
7349                .equals(other.getReqInfo());
7350          }
7351          result = result && (hasMinTxIdToKeep() == other.hasMinTxIdToKeep());
7352          if (hasMinTxIdToKeep()) {
7353            result = result && (getMinTxIdToKeep()
7354                == other.getMinTxIdToKeep());
7355          }
7356          result = result &&
7357              getUnknownFields().equals(other.getUnknownFields());
7358          return result;
7359        }
7360    
7361        private int memoizedHashCode = 0;
7362        @java.lang.Override
7363        public int hashCode() {
7364          if (memoizedHashCode != 0) {
7365            return memoizedHashCode;
7366          }
7367          int hash = 41;
7368          hash = (19 * hash) + getDescriptorForType().hashCode();
7369          if (hasReqInfo()) {
7370            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
7371            hash = (53 * hash) + getReqInfo().hashCode();
7372          }
7373          if (hasMinTxIdToKeep()) {
7374            hash = (37 * hash) + MINTXIDTOKEEP_FIELD_NUMBER;
7375            hash = (53 * hash) + hashLong(getMinTxIdToKeep());
7376          }
7377          hash = (29 * hash) + getUnknownFields().hashCode();
7378          memoizedHashCode = hash;
7379          return hash;
7380        }
7381    
7382        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7383            com.google.protobuf.ByteString data)
7384            throws com.google.protobuf.InvalidProtocolBufferException {
7385          return PARSER.parseFrom(data);
7386        }
7387        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7388            com.google.protobuf.ByteString data,
7389            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7390            throws com.google.protobuf.InvalidProtocolBufferException {
7391          return PARSER.parseFrom(data, extensionRegistry);
7392        }
7393        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(byte[] data)
7394            throws com.google.protobuf.InvalidProtocolBufferException {
7395          return PARSER.parseFrom(data);
7396        }
7397        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7398            byte[] data,
7399            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7400            throws com.google.protobuf.InvalidProtocolBufferException {
7401          return PARSER.parseFrom(data, extensionRegistry);
7402        }
7403        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(java.io.InputStream input)
7404            throws java.io.IOException {
7405          return PARSER.parseFrom(input);
7406        }
7407        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7408            java.io.InputStream input,
7409            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7410            throws java.io.IOException {
7411          return PARSER.parseFrom(input, extensionRegistry);
7412        }
7413        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseDelimitedFrom(java.io.InputStream input)
7414            throws java.io.IOException {
7415          return PARSER.parseDelimitedFrom(input);
7416        }
7417        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseDelimitedFrom(
7418            java.io.InputStream input,
7419            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7420            throws java.io.IOException {
7421          return PARSER.parseDelimitedFrom(input, extensionRegistry);
7422        }
7423        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7424            com.google.protobuf.CodedInputStream input)
7425            throws java.io.IOException {
7426          return PARSER.parseFrom(input);
7427        }
7428        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7429            com.google.protobuf.CodedInputStream input,
7430            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7431            throws java.io.IOException {
7432          return PARSER.parseFrom(input, extensionRegistry);
7433        }
7434    
7435        public static Builder newBuilder() { return Builder.create(); }
7436        public Builder newBuilderForType() { return newBuilder(); }
7437        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto prototype) {
7438          return newBuilder().mergeFrom(prototype);
7439        }
7440        public Builder toBuilder() { return newBuilder(this); }
7441    
7442        @java.lang.Override
7443        protected Builder newBuilderForType(
7444            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7445          Builder builder = new Builder(parent);
7446          return builder;
7447        }
7448        /**
7449         * Protobuf type {@code hadoop.hdfs.PurgeLogsRequestProto}
7450         *
7451         * <pre>
7452         **
7453         * purgeLogs()
7454         * </pre>
7455         */
7456        public static final class Builder extends
7457            com.google.protobuf.GeneratedMessage.Builder<Builder>
7458           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProtoOrBuilder {
7459          public static final com.google.protobuf.Descriptors.Descriptor
7460              getDescriptor() {
7461            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor;
7462          }
7463    
7464          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7465              internalGetFieldAccessorTable() {
7466            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsRequestProto_fieldAccessorTable
7467                .ensureFieldAccessorsInitialized(
7468                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.Builder.class);
7469          }
7470    
7471          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.newBuilder()
7472          private Builder() {
7473            maybeForceBuilderInitialization();
7474          }
7475    
7476          private Builder(
7477              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7478            super(parent);
7479            maybeForceBuilderInitialization();
7480          }
7481          private void maybeForceBuilderInitialization() {
7482            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
7483              getReqInfoFieldBuilder();
7484            }
7485          }
7486          private static Builder create() {
7487            return new Builder();
7488          }
7489    
7490          public Builder clear() {
7491            super.clear();
7492            if (reqInfoBuilder_ == null) {
7493              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
7494            } else {
7495              reqInfoBuilder_.clear();
7496            }
7497            bitField0_ = (bitField0_ & ~0x00000001);
7498            minTxIdToKeep_ = 0L;
7499            bitField0_ = (bitField0_ & ~0x00000002);
7500            return this;
7501          }
7502    
7503          public Builder clone() {
7504            return create().mergeFrom(buildPartial());
7505          }
7506    
7507          public com.google.protobuf.Descriptors.Descriptor
7508              getDescriptorForType() {
7509            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor;
7510          }
7511    
7512          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto getDefaultInstanceForType() {
7513            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance();
7514          }
7515    
7516          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto build() {
7517            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto result = buildPartial();
7518            if (!result.isInitialized()) {
7519              throw newUninitializedMessageException(result);
7520            }
7521            return result;
7522          }
7523    
7524          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto buildPartial() {
7525            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto(this);
7526            int from_bitField0_ = bitField0_;
7527            int to_bitField0_ = 0;
7528            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
7529              to_bitField0_ |= 0x00000001;
7530            }
7531            if (reqInfoBuilder_ == null) {
7532              result.reqInfo_ = reqInfo_;
7533            } else {
7534              result.reqInfo_ = reqInfoBuilder_.build();
7535            }
7536            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
7537              to_bitField0_ |= 0x00000002;
7538            }
7539            result.minTxIdToKeep_ = minTxIdToKeep_;
7540            result.bitField0_ = to_bitField0_;
7541            onBuilt();
7542            return result;
7543          }
7544    
7545          public Builder mergeFrom(com.google.protobuf.Message other) {
7546            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto) {
7547              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)other);
7548            } else {
7549              super.mergeFrom(other);
7550              return this;
7551            }
7552          }
7553    
7554          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto other) {
7555            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance()) return this;
7556            if (other.hasReqInfo()) {
7557              mergeReqInfo(other.getReqInfo());
7558            }
7559            if (other.hasMinTxIdToKeep()) {
7560              setMinTxIdToKeep(other.getMinTxIdToKeep());
7561            }
7562            this.mergeUnknownFields(other.getUnknownFields());
7563            return this;
7564          }
7565    
7566          public final boolean isInitialized() {
7567            if (!hasReqInfo()) {
7568              
7569              return false;
7570            }
7571            if (!hasMinTxIdToKeep()) {
7572              
7573              return false;
7574            }
7575            if (!getReqInfo().isInitialized()) {
7576              
7577              return false;
7578            }
7579            return true;
7580          }
7581    
7582          public Builder mergeFrom(
7583              com.google.protobuf.CodedInputStream input,
7584              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7585              throws java.io.IOException {
7586            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parsedMessage = null;
7587            try {
7588              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
7589            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
7590              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto) e.getUnfinishedMessage();
7591              throw e;
7592            } finally {
7593              if (parsedMessage != null) {
7594                mergeFrom(parsedMessage);
7595              }
7596            }
7597            return this;
7598          }
7599          private int bitField0_;
7600    
7601          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
7602          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
7603          private com.google.protobuf.SingleFieldBuilder<
7604              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
7605          /**
7606           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7607           */
7608          public boolean hasReqInfo() {
7609            return ((bitField0_ & 0x00000001) == 0x00000001);
7610          }
7611          /**
7612           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7613           */
7614          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
7615            if (reqInfoBuilder_ == null) {
7616              return reqInfo_;
7617            } else {
7618              return reqInfoBuilder_.getMessage();
7619            }
7620          }
7621          /**
7622           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7623           */
7624          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
7625            if (reqInfoBuilder_ == null) {
7626              if (value == null) {
7627                throw new NullPointerException();
7628              }
7629              reqInfo_ = value;
7630              onChanged();
7631            } else {
7632              reqInfoBuilder_.setMessage(value);
7633            }
7634            bitField0_ |= 0x00000001;
7635            return this;
7636          }
7637          /**
7638           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7639           */
7640          public Builder setReqInfo(
7641              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
7642            if (reqInfoBuilder_ == null) {
7643              reqInfo_ = builderForValue.build();
7644              onChanged();
7645            } else {
7646              reqInfoBuilder_.setMessage(builderForValue.build());
7647            }
7648            bitField0_ |= 0x00000001;
7649            return this;
7650          }
7651          /**
7652           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7653           */
7654          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
7655            if (reqInfoBuilder_ == null) {
7656              if (((bitField0_ & 0x00000001) == 0x00000001) &&
7657                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
7658                reqInfo_ =
7659                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
7660              } else {
7661                reqInfo_ = value;
7662              }
7663              onChanged();
7664            } else {
7665              reqInfoBuilder_.mergeFrom(value);
7666            }
7667            bitField0_ |= 0x00000001;
7668            return this;
7669          }
7670          /**
7671           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7672           */
7673          public Builder clearReqInfo() {
7674            if (reqInfoBuilder_ == null) {
7675              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
7676              onChanged();
7677            } else {
7678              reqInfoBuilder_.clear();
7679            }
7680            bitField0_ = (bitField0_ & ~0x00000001);
7681            return this;
7682          }
7683          /**
7684           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7685           */
7686          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
7687            bitField0_ |= 0x00000001;
7688            onChanged();
7689            return getReqInfoFieldBuilder().getBuilder();
7690          }
7691          /**
7692           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7693           */
7694          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
7695            if (reqInfoBuilder_ != null) {
7696              return reqInfoBuilder_.getMessageOrBuilder();
7697            } else {
7698              return reqInfo_;
7699            }
7700          }
7701          /**
7702           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
7703           */
7704          private com.google.protobuf.SingleFieldBuilder<
7705              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
7706              getReqInfoFieldBuilder() {
7707            if (reqInfoBuilder_ == null) {
7708              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
7709                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
7710                      reqInfo_,
7711                      getParentForChildren(),
7712                      isClean());
7713              reqInfo_ = null;
7714            }
7715            return reqInfoBuilder_;
7716          }
7717    
7718          // required uint64 minTxIdToKeep = 2;
7719          private long minTxIdToKeep_ ;
7720          /**
7721           * <code>required uint64 minTxIdToKeep = 2;</code>
7722           */
7723          public boolean hasMinTxIdToKeep() {
7724            return ((bitField0_ & 0x00000002) == 0x00000002);
7725          }
7726          /**
7727           * <code>required uint64 minTxIdToKeep = 2;</code>
7728           */
7729          public long getMinTxIdToKeep() {
7730            return minTxIdToKeep_;
7731          }
7732          /**
7733           * <code>required uint64 minTxIdToKeep = 2;</code>
7734           */
7735          public Builder setMinTxIdToKeep(long value) {
7736            bitField0_ |= 0x00000002;
7737            minTxIdToKeep_ = value;
7738            onChanged();
7739            return this;
7740          }
7741          /**
7742           * <code>required uint64 minTxIdToKeep = 2;</code>
7743           */
7744          public Builder clearMinTxIdToKeep() {
7745            bitField0_ = (bitField0_ & ~0x00000002);
7746            minTxIdToKeep_ = 0L;
7747            onChanged();
7748            return this;
7749          }
7750    
7751          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PurgeLogsRequestProto)
7752        }
7753    
7754        static {
7755          defaultInstance = new PurgeLogsRequestProto(true);
7756          defaultInstance.initFields();
7757        }
7758    
7759        // @@protoc_insertion_point(class_scope:hadoop.hdfs.PurgeLogsRequestProto)
7760      }
7761    
7762      public interface PurgeLogsResponseProtoOrBuilder
7763          extends com.google.protobuf.MessageOrBuilder {
7764      }
7765      /**
7766       * Protobuf type {@code hadoop.hdfs.PurgeLogsResponseProto}
7767       */
7768      public static final class PurgeLogsResponseProto extends
7769          com.google.protobuf.GeneratedMessage
7770          implements PurgeLogsResponseProtoOrBuilder {
7771        // Use PurgeLogsResponseProto.newBuilder() to construct.
7772        private PurgeLogsResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
7773          super(builder);
7774          this.unknownFields = builder.getUnknownFields();
7775        }
7776        private PurgeLogsResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
7777    
7778        private static final PurgeLogsResponseProto defaultInstance;
7779        public static PurgeLogsResponseProto getDefaultInstance() {
7780          return defaultInstance;
7781        }
7782    
7783        public PurgeLogsResponseProto getDefaultInstanceForType() {
7784          return defaultInstance;
7785        }
7786    
7787        private final com.google.protobuf.UnknownFieldSet unknownFields;
7788        @java.lang.Override
7789        public final com.google.protobuf.UnknownFieldSet
7790            getUnknownFields() {
7791          return this.unknownFields;
7792        }
7793        private PurgeLogsResponseProto(
7794            com.google.protobuf.CodedInputStream input,
7795            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7796            throws com.google.protobuf.InvalidProtocolBufferException {
7797          initFields();
7798          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
7799              com.google.protobuf.UnknownFieldSet.newBuilder();
7800          try {
7801            boolean done = false;
7802            while (!done) {
7803              int tag = input.readTag();
7804              switch (tag) {
7805                case 0:
7806                  done = true;
7807                  break;
7808                default: {
7809                  if (!parseUnknownField(input, unknownFields,
7810                                         extensionRegistry, tag)) {
7811                    done = true;
7812                  }
7813                  break;
7814                }
7815              }
7816            }
7817          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
7818            throw e.setUnfinishedMessage(this);
7819          } catch (java.io.IOException e) {
7820            throw new com.google.protobuf.InvalidProtocolBufferException(
7821                e.getMessage()).setUnfinishedMessage(this);
7822          } finally {
7823            this.unknownFields = unknownFields.build();
7824            makeExtensionsImmutable();
7825          }
7826        }
7827        public static final com.google.protobuf.Descriptors.Descriptor
7828            getDescriptor() {
7829          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor;
7830        }
7831    
7832        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7833            internalGetFieldAccessorTable() {
7834          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsResponseProto_fieldAccessorTable
7835              .ensureFieldAccessorsInitialized(
7836                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.Builder.class);
7837        }
7838    
7839        public static com.google.protobuf.Parser<PurgeLogsResponseProto> PARSER =
7840            new com.google.protobuf.AbstractParser<PurgeLogsResponseProto>() {
7841          public PurgeLogsResponseProto parsePartialFrom(
7842              com.google.protobuf.CodedInputStream input,
7843              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7844              throws com.google.protobuf.InvalidProtocolBufferException {
7845            return new PurgeLogsResponseProto(input, extensionRegistry);
7846          }
7847        };
7848    
7849        @java.lang.Override
7850        public com.google.protobuf.Parser<PurgeLogsResponseProto> getParserForType() {
7851          return PARSER;
7852        }
7853    
7854        private void initFields() {
7855        }
7856        private byte memoizedIsInitialized = -1;
7857        public final boolean isInitialized() {
7858          byte isInitialized = memoizedIsInitialized;
7859          if (isInitialized != -1) return isInitialized == 1;
7860    
7861          memoizedIsInitialized = 1;
7862          return true;
7863        }
7864    
7865        public void writeTo(com.google.protobuf.CodedOutputStream output)
7866                            throws java.io.IOException {
7867          getSerializedSize();
7868          getUnknownFields().writeTo(output);
7869        }
7870    
7871        private int memoizedSerializedSize = -1;
7872        public int getSerializedSize() {
7873          int size = memoizedSerializedSize;
7874          if (size != -1) return size;
7875    
7876          size = 0;
7877          size += getUnknownFields().getSerializedSize();
7878          memoizedSerializedSize = size;
7879          return size;
7880        }
7881    
7882        private static final long serialVersionUID = 0L;
7883        @java.lang.Override
7884        protected java.lang.Object writeReplace()
7885            throws java.io.ObjectStreamException {
7886          return super.writeReplace();
7887        }
7888    
7889        @java.lang.Override
7890        public boolean equals(final java.lang.Object obj) {
7891          if (obj == this) {
7892           return true;
7893          }
7894          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto)) {
7895            return super.equals(obj);
7896          }
7897          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) obj;
7898    
7899          boolean result = true;
7900          result = result &&
7901              getUnknownFields().equals(other.getUnknownFields());
7902          return result;
7903        }
7904    
7905        private int memoizedHashCode = 0;
7906        @java.lang.Override
7907        public int hashCode() {
7908          if (memoizedHashCode != 0) {
7909            return memoizedHashCode;
7910          }
7911          int hash = 41;
7912          hash = (19 * hash) + getDescriptorForType().hashCode();
7913          hash = (29 * hash) + getUnknownFields().hashCode();
7914          memoizedHashCode = hash;
7915          return hash;
7916        }
7917    
7918        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
7919            com.google.protobuf.ByteString data)
7920            throws com.google.protobuf.InvalidProtocolBufferException {
7921          return PARSER.parseFrom(data);
7922        }
7923        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
7924            com.google.protobuf.ByteString data,
7925            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7926            throws com.google.protobuf.InvalidProtocolBufferException {
7927          return PARSER.parseFrom(data, extensionRegistry);
7928        }
7929        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(byte[] data)
7930            throws com.google.protobuf.InvalidProtocolBufferException {
7931          return PARSER.parseFrom(data);
7932        }
7933        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
7934            byte[] data,
7935            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7936            throws com.google.protobuf.InvalidProtocolBufferException {
7937          return PARSER.parseFrom(data, extensionRegistry);
7938        }
7939        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(java.io.InputStream input)
7940            throws java.io.IOException {
7941          return PARSER.parseFrom(input);
7942        }
7943        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
7944            java.io.InputStream input,
7945            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7946            throws java.io.IOException {
7947          return PARSER.parseFrom(input, extensionRegistry);
7948        }
7949        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseDelimitedFrom(java.io.InputStream input)
7950            throws java.io.IOException {
7951          return PARSER.parseDelimitedFrom(input);
7952        }
7953        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseDelimitedFrom(
7954            java.io.InputStream input,
7955            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7956            throws java.io.IOException {
7957          return PARSER.parseDelimitedFrom(input, extensionRegistry);
7958        }
7959        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
7960            com.google.protobuf.CodedInputStream input)
7961            throws java.io.IOException {
7962          return PARSER.parseFrom(input);
7963        }
7964        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
7965            com.google.protobuf.CodedInputStream input,
7966            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7967            throws java.io.IOException {
7968          return PARSER.parseFrom(input, extensionRegistry);
7969        }
7970    
7971        public static Builder newBuilder() { return Builder.create(); }
7972        public Builder newBuilderForType() { return newBuilder(); }
7973        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto prototype) {
7974          return newBuilder().mergeFrom(prototype);
7975        }
7976        public Builder toBuilder() { return newBuilder(this); }
7977    
7978        @java.lang.Override
7979        protected Builder newBuilderForType(
7980            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7981          Builder builder = new Builder(parent);
7982          return builder;
7983        }
7984        /**
7985         * Protobuf type {@code hadoop.hdfs.PurgeLogsResponseProto}
7986         */
7987        public static final class Builder extends
7988            com.google.protobuf.GeneratedMessage.Builder<Builder>
7989           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProtoOrBuilder {
7990          public static final com.google.protobuf.Descriptors.Descriptor
7991              getDescriptor() {
7992            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor;
7993          }
7994    
7995          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7996              internalGetFieldAccessorTable() {
7997            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsResponseProto_fieldAccessorTable
7998                .ensureFieldAccessorsInitialized(
7999                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.Builder.class);
8000          }
8001    
8002          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.newBuilder()
8003          private Builder() {
8004            maybeForceBuilderInitialization();
8005          }
8006    
8007          private Builder(
8008              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8009            super(parent);
8010            maybeForceBuilderInitialization();
8011          }
8012          private void maybeForceBuilderInitialization() {
8013            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
8014            }
8015          }
8016          private static Builder create() {
8017            return new Builder();
8018          }
8019    
8020          public Builder clear() {
8021            super.clear();
8022            return this;
8023          }
8024    
8025          public Builder clone() {
8026            return create().mergeFrom(buildPartial());
8027          }
8028    
8029          public com.google.protobuf.Descriptors.Descriptor
8030              getDescriptorForType() {
8031            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor;
8032          }
8033    
8034          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto getDefaultInstanceForType() {
8035            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance();
8036          }
8037    
8038          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto build() {
8039            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto result = buildPartial();
8040            if (!result.isInitialized()) {
8041              throw newUninitializedMessageException(result);
8042            }
8043            return result;
8044          }
8045    
8046          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto buildPartial() {
8047            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto(this);
8048            onBuilt();
8049            return result;
8050          }
8051    
8052          public Builder mergeFrom(com.google.protobuf.Message other) {
8053            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) {
8054              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto)other);
8055            } else {
8056              super.mergeFrom(other);
8057              return this;
8058            }
8059          }
8060    
8061          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto other) {
8062            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance()) return this;
8063            this.mergeUnknownFields(other.getUnknownFields());
8064            return this;
8065          }
8066    
8067          public final boolean isInitialized() {
8068            return true;
8069          }
8070    
8071          public Builder mergeFrom(
8072              com.google.protobuf.CodedInputStream input,
8073              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8074              throws java.io.IOException {
8075            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parsedMessage = null;
8076            try {
8077              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
8078            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
8079              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) e.getUnfinishedMessage();
8080              throw e;
8081            } finally {
8082              if (parsedMessage != null) {
8083                mergeFrom(parsedMessage);
8084              }
8085            }
8086            return this;
8087          }
8088    
8089          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PurgeLogsResponseProto)
8090        }
8091    
8092        static {
8093          defaultInstance = new PurgeLogsResponseProto(true);
8094          defaultInstance.initFields();
8095        }
8096    
8097        // @@protoc_insertion_point(class_scope:hadoop.hdfs.PurgeLogsResponseProto)
8098      }
8099    
8100      public interface IsFormattedRequestProtoOrBuilder
8101          extends com.google.protobuf.MessageOrBuilder {
8102    
8103        // required .hadoop.hdfs.JournalIdProto jid = 1;
8104        /**
8105         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8106         */
8107        boolean hasJid();
8108        /**
8109         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8110         */
8111        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
8112        /**
8113         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8114         */
8115        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
8116      }
8117      /**
8118       * Protobuf type {@code hadoop.hdfs.IsFormattedRequestProto}
8119       *
8120       * <pre>
8121       **
8122       * isFormatted()
8123       * </pre>
8124       */
8125      public static final class IsFormattedRequestProto extends
8126          com.google.protobuf.GeneratedMessage
8127          implements IsFormattedRequestProtoOrBuilder {
8128        // Use IsFormattedRequestProto.newBuilder() to construct.
8129        private IsFormattedRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
8130          super(builder);
8131          this.unknownFields = builder.getUnknownFields();
8132        }
8133        private IsFormattedRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
8134    
8135        private static final IsFormattedRequestProto defaultInstance;
8136        public static IsFormattedRequestProto getDefaultInstance() {
8137          return defaultInstance;
8138        }
8139    
8140        public IsFormattedRequestProto getDefaultInstanceForType() {
8141          return defaultInstance;
8142        }
8143    
8144        private final com.google.protobuf.UnknownFieldSet unknownFields;
8145        @java.lang.Override
8146        public final com.google.protobuf.UnknownFieldSet
8147            getUnknownFields() {
8148          return this.unknownFields;
8149        }
8150        private IsFormattedRequestProto(
8151            com.google.protobuf.CodedInputStream input,
8152            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8153            throws com.google.protobuf.InvalidProtocolBufferException {
8154          initFields();
8155          int mutable_bitField0_ = 0;
8156          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
8157              com.google.protobuf.UnknownFieldSet.newBuilder();
8158          try {
8159            boolean done = false;
8160            while (!done) {
8161              int tag = input.readTag();
8162              switch (tag) {
8163                case 0:
8164                  done = true;
8165                  break;
8166                default: {
8167                  if (!parseUnknownField(input, unknownFields,
8168                                         extensionRegistry, tag)) {
8169                    done = true;
8170                  }
8171                  break;
8172                }
8173                case 10: {
8174                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
8175                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
8176                    subBuilder = jid_.toBuilder();
8177                  }
8178                  jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
8179                  if (subBuilder != null) {
8180                    subBuilder.mergeFrom(jid_);
8181                    jid_ = subBuilder.buildPartial();
8182                  }
8183                  bitField0_ |= 0x00000001;
8184                  break;
8185                }
8186              }
8187            }
8188          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
8189            throw e.setUnfinishedMessage(this);
8190          } catch (java.io.IOException e) {
8191            throw new com.google.protobuf.InvalidProtocolBufferException(
8192                e.getMessage()).setUnfinishedMessage(this);
8193          } finally {
8194            this.unknownFields = unknownFields.build();
8195            makeExtensionsImmutable();
8196          }
8197        }
8198        public static final com.google.protobuf.Descriptors.Descriptor
8199            getDescriptor() {
8200          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor;
8201        }
8202    
8203        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
8204            internalGetFieldAccessorTable() {
8205          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedRequestProto_fieldAccessorTable
8206              .ensureFieldAccessorsInitialized(
8207                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.Builder.class);
8208        }
8209    
8210        public static com.google.protobuf.Parser<IsFormattedRequestProto> PARSER =
8211            new com.google.protobuf.AbstractParser<IsFormattedRequestProto>() {
8212          public IsFormattedRequestProto parsePartialFrom(
8213              com.google.protobuf.CodedInputStream input,
8214              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8215              throws com.google.protobuf.InvalidProtocolBufferException {
8216            return new IsFormattedRequestProto(input, extensionRegistry);
8217          }
8218        };
8219    
8220        @java.lang.Override
8221        public com.google.protobuf.Parser<IsFormattedRequestProto> getParserForType() {
8222          return PARSER;
8223        }
8224    
8225        private int bitField0_;
8226        // required .hadoop.hdfs.JournalIdProto jid = 1;
8227        public static final int JID_FIELD_NUMBER = 1;
8228        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
8229        /**
8230         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8231         */
8232        public boolean hasJid() {
8233          return ((bitField0_ & 0x00000001) == 0x00000001);
8234        }
8235        /**
8236         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8237         */
8238        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
8239          return jid_;
8240        }
8241        /**
8242         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8243         */
8244        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
8245          return jid_;
8246        }
8247    
8248        private void initFields() {
8249          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
8250        }
8251        private byte memoizedIsInitialized = -1;
8252        public final boolean isInitialized() {
8253          byte isInitialized = memoizedIsInitialized;
8254          if (isInitialized != -1) return isInitialized == 1;
8255    
8256          if (!hasJid()) {
8257            memoizedIsInitialized = 0;
8258            return false;
8259          }
8260          if (!getJid().isInitialized()) {
8261            memoizedIsInitialized = 0;
8262            return false;
8263          }
8264          memoizedIsInitialized = 1;
8265          return true;
8266        }
8267    
8268        public void writeTo(com.google.protobuf.CodedOutputStream output)
8269                            throws java.io.IOException {
8270          getSerializedSize();
8271          if (((bitField0_ & 0x00000001) == 0x00000001)) {
8272            output.writeMessage(1, jid_);
8273          }
8274          getUnknownFields().writeTo(output);
8275        }
8276    
8277        private int memoizedSerializedSize = -1;
8278        public int getSerializedSize() {
8279          int size = memoizedSerializedSize;
8280          if (size != -1) return size;
8281    
8282          size = 0;
8283          if (((bitField0_ & 0x00000001) == 0x00000001)) {
8284            size += com.google.protobuf.CodedOutputStream
8285              .computeMessageSize(1, jid_);
8286          }
8287          size += getUnknownFields().getSerializedSize();
8288          memoizedSerializedSize = size;
8289          return size;
8290        }
8291    
8292        private static final long serialVersionUID = 0L;
8293        @java.lang.Override
8294        protected java.lang.Object writeReplace()
8295            throws java.io.ObjectStreamException {
8296          return super.writeReplace();
8297        }
8298    
8299        @java.lang.Override
8300        public boolean equals(final java.lang.Object obj) {
8301          if (obj == this) {
8302           return true;
8303          }
8304          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)) {
8305            return super.equals(obj);
8306          }
8307          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto) obj;
8308    
8309          boolean result = true;
8310          result = result && (hasJid() == other.hasJid());
8311          if (hasJid()) {
8312            result = result && getJid()
8313                .equals(other.getJid());
8314          }
8315          result = result &&
8316              getUnknownFields().equals(other.getUnknownFields());
8317          return result;
8318        }
8319    
8320        private int memoizedHashCode = 0;
8321        @java.lang.Override
8322        public int hashCode() {
8323          if (memoizedHashCode != 0) {
8324            return memoizedHashCode;
8325          }
8326          int hash = 41;
8327          hash = (19 * hash) + getDescriptorForType().hashCode();
8328          if (hasJid()) {
8329            hash = (37 * hash) + JID_FIELD_NUMBER;
8330            hash = (53 * hash) + getJid().hashCode();
8331          }
8332          hash = (29 * hash) + getUnknownFields().hashCode();
8333          memoizedHashCode = hash;
8334          return hash;
8335        }
8336    
8337        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8338            com.google.protobuf.ByteString data)
8339            throws com.google.protobuf.InvalidProtocolBufferException {
8340          return PARSER.parseFrom(data);
8341        }
8342        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8343            com.google.protobuf.ByteString data,
8344            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8345            throws com.google.protobuf.InvalidProtocolBufferException {
8346          return PARSER.parseFrom(data, extensionRegistry);
8347        }
8348        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(byte[] data)
8349            throws com.google.protobuf.InvalidProtocolBufferException {
8350          return PARSER.parseFrom(data);
8351        }
8352        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8353            byte[] data,
8354            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8355            throws com.google.protobuf.InvalidProtocolBufferException {
8356          return PARSER.parseFrom(data, extensionRegistry);
8357        }
8358        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(java.io.InputStream input)
8359            throws java.io.IOException {
8360          return PARSER.parseFrom(input);
8361        }
8362        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8363            java.io.InputStream input,
8364            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8365            throws java.io.IOException {
8366          return PARSER.parseFrom(input, extensionRegistry);
8367        }
8368        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseDelimitedFrom(java.io.InputStream input)
8369            throws java.io.IOException {
8370          return PARSER.parseDelimitedFrom(input);
8371        }
8372        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseDelimitedFrom(
8373            java.io.InputStream input,
8374            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8375            throws java.io.IOException {
8376          return PARSER.parseDelimitedFrom(input, extensionRegistry);
8377        }
8378        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8379            com.google.protobuf.CodedInputStream input)
8380            throws java.io.IOException {
8381          return PARSER.parseFrom(input);
8382        }
8383        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8384            com.google.protobuf.CodedInputStream input,
8385            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8386            throws java.io.IOException {
8387          return PARSER.parseFrom(input, extensionRegistry);
8388        }
8389    
8390        public static Builder newBuilder() { return Builder.create(); }
8391        public Builder newBuilderForType() { return newBuilder(); }
8392        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto prototype) {
8393          return newBuilder().mergeFrom(prototype);
8394        }
8395        public Builder toBuilder() { return newBuilder(this); }
8396    
8397        @java.lang.Override
8398        protected Builder newBuilderForType(
8399            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8400          Builder builder = new Builder(parent);
8401          return builder;
8402        }
8403        /**
8404         * Protobuf type {@code hadoop.hdfs.IsFormattedRequestProto}
8405         *
8406         * <pre>
8407         **
8408         * isFormatted()
8409         * </pre>
8410         */
8411        public static final class Builder extends
8412            com.google.protobuf.GeneratedMessage.Builder<Builder>
8413           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProtoOrBuilder {
8414          public static final com.google.protobuf.Descriptors.Descriptor
8415              getDescriptor() {
8416            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor;
8417          }
8418    
8419          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
8420              internalGetFieldAccessorTable() {
8421            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedRequestProto_fieldAccessorTable
8422                .ensureFieldAccessorsInitialized(
8423                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.Builder.class);
8424          }
8425    
8426          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.newBuilder()
8427          private Builder() {
8428            maybeForceBuilderInitialization();
8429          }
8430    
8431          private Builder(
8432              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8433            super(parent);
8434            maybeForceBuilderInitialization();
8435          }
8436          private void maybeForceBuilderInitialization() {
8437            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
8438              getJidFieldBuilder();
8439            }
8440          }
8441          private static Builder create() {
8442            return new Builder();
8443          }
8444    
8445          public Builder clear() {
8446            super.clear();
8447            if (jidBuilder_ == null) {
8448              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
8449            } else {
8450              jidBuilder_.clear();
8451            }
8452            bitField0_ = (bitField0_ & ~0x00000001);
8453            return this;
8454          }
8455    
8456          public Builder clone() {
8457            return create().mergeFrom(buildPartial());
8458          }
8459    
8460          public com.google.protobuf.Descriptors.Descriptor
8461              getDescriptorForType() {
8462            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor;
8463          }
8464    
8465          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto getDefaultInstanceForType() {
8466            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance();
8467          }
8468    
8469          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto build() {
8470            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto result = buildPartial();
8471            if (!result.isInitialized()) {
8472              throw newUninitializedMessageException(result);
8473            }
8474            return result;
8475          }
8476    
8477          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto buildPartial() {
8478            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto(this);
8479            int from_bitField0_ = bitField0_;
8480            int to_bitField0_ = 0;
8481            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
8482              to_bitField0_ |= 0x00000001;
8483            }
8484            if (jidBuilder_ == null) {
8485              result.jid_ = jid_;
8486            } else {
8487              result.jid_ = jidBuilder_.build();
8488            }
8489            result.bitField0_ = to_bitField0_;
8490            onBuilt();
8491            return result;
8492          }
8493    
8494          public Builder mergeFrom(com.google.protobuf.Message other) {
8495            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto) {
8496              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)other);
8497            } else {
8498              super.mergeFrom(other);
8499              return this;
8500            }
8501          }
8502    
8503          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto other) {
8504            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance()) return this;
8505            if (other.hasJid()) {
8506              mergeJid(other.getJid());
8507            }
8508            this.mergeUnknownFields(other.getUnknownFields());
8509            return this;
8510          }
8511    
8512          public final boolean isInitialized() {
8513            if (!hasJid()) {
8514              
8515              return false;
8516            }
8517            if (!getJid().isInitialized()) {
8518              
8519              return false;
8520            }
8521            return true;
8522          }
8523    
8524          public Builder mergeFrom(
8525              com.google.protobuf.CodedInputStream input,
8526              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8527              throws java.io.IOException {
8528            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parsedMessage = null;
8529            try {
8530              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
8531            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
8532              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto) e.getUnfinishedMessage();
8533              throw e;
8534            } finally {
8535              if (parsedMessage != null) {
8536                mergeFrom(parsedMessage);
8537              }
8538            }
8539            return this;
8540          }
8541          private int bitField0_;
8542    
8543          // required .hadoop.hdfs.JournalIdProto jid = 1;
8544          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
8545          private com.google.protobuf.SingleFieldBuilder<
8546              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
8547          /**
8548           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8549           */
8550          public boolean hasJid() {
8551            return ((bitField0_ & 0x00000001) == 0x00000001);
8552          }
8553          /**
8554           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8555           */
8556          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
8557            if (jidBuilder_ == null) {
8558              return jid_;
8559            } else {
8560              return jidBuilder_.getMessage();
8561            }
8562          }
8563          /**
8564           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8565           */
8566          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
8567            if (jidBuilder_ == null) {
8568              if (value == null) {
8569                throw new NullPointerException();
8570              }
8571              jid_ = value;
8572              onChanged();
8573            } else {
8574              jidBuilder_.setMessage(value);
8575            }
8576            bitField0_ |= 0x00000001;
8577            return this;
8578          }
8579          /**
8580           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8581           */
8582          public Builder setJid(
8583              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
8584            if (jidBuilder_ == null) {
8585              jid_ = builderForValue.build();
8586              onChanged();
8587            } else {
8588              jidBuilder_.setMessage(builderForValue.build());
8589            }
8590            bitField0_ |= 0x00000001;
8591            return this;
8592          }
8593          /**
8594           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8595           */
8596          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
8597            if (jidBuilder_ == null) {
8598              if (((bitField0_ & 0x00000001) == 0x00000001) &&
8599                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
8600                jid_ =
8601                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
8602              } else {
8603                jid_ = value;
8604              }
8605              onChanged();
8606            } else {
8607              jidBuilder_.mergeFrom(value);
8608            }
8609            bitField0_ |= 0x00000001;
8610            return this;
8611          }
8612          /**
8613           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8614           */
8615          public Builder clearJid() {
8616            if (jidBuilder_ == null) {
8617              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
8618              onChanged();
8619            } else {
8620              jidBuilder_.clear();
8621            }
8622            bitField0_ = (bitField0_ & ~0x00000001);
8623            return this;
8624          }
8625          /**
8626           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8627           */
8628          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
8629            bitField0_ |= 0x00000001;
8630            onChanged();
8631            return getJidFieldBuilder().getBuilder();
8632          }
8633          /**
8634           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8635           */
8636          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
8637            if (jidBuilder_ != null) {
8638              return jidBuilder_.getMessageOrBuilder();
8639            } else {
8640              return jid_;
8641            }
8642          }
8643          /**
8644           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
8645           */
8646          private com.google.protobuf.SingleFieldBuilder<
8647              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
8648              getJidFieldBuilder() {
8649            if (jidBuilder_ == null) {
8650              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
8651                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
8652                      jid_,
8653                      getParentForChildren(),
8654                      isClean());
8655              jid_ = null;
8656            }
8657            return jidBuilder_;
8658          }
8659    
8660          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.IsFormattedRequestProto)
8661        }
8662    
8663        static {
8664          defaultInstance = new IsFormattedRequestProto(true);
8665          defaultInstance.initFields();
8666        }
8667    
8668        // @@protoc_insertion_point(class_scope:hadoop.hdfs.IsFormattedRequestProto)
8669      }
8670    
8671      public interface IsFormattedResponseProtoOrBuilder
8672          extends com.google.protobuf.MessageOrBuilder {
8673    
8674        // required bool isFormatted = 1;
8675        /**
8676         * <code>required bool isFormatted = 1;</code>
8677         */
8678        boolean hasIsFormatted();
8679        /**
8680         * <code>required bool isFormatted = 1;</code>
8681         */
8682        boolean getIsFormatted();
8683      }
8684      /**
8685       * Protobuf type {@code hadoop.hdfs.IsFormattedResponseProto}
8686       */
8687      public static final class IsFormattedResponseProto extends
8688          com.google.protobuf.GeneratedMessage
8689          implements IsFormattedResponseProtoOrBuilder {
8690        // Use IsFormattedResponseProto.newBuilder() to construct.
8691        private IsFormattedResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
8692          super(builder);
8693          this.unknownFields = builder.getUnknownFields();
8694        }
8695        private IsFormattedResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
8696    
8697        private static final IsFormattedResponseProto defaultInstance;
8698        public static IsFormattedResponseProto getDefaultInstance() {
8699          return defaultInstance;
8700        }
8701    
8702        public IsFormattedResponseProto getDefaultInstanceForType() {
8703          return defaultInstance;
8704        }
8705    
8706        private final com.google.protobuf.UnknownFieldSet unknownFields;
8707        @java.lang.Override
8708        public final com.google.protobuf.UnknownFieldSet
8709            getUnknownFields() {
8710          return this.unknownFields;
8711        }
8712        private IsFormattedResponseProto(
8713            com.google.protobuf.CodedInputStream input,
8714            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8715            throws com.google.protobuf.InvalidProtocolBufferException {
8716          initFields();
8717          int mutable_bitField0_ = 0;
8718          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
8719              com.google.protobuf.UnknownFieldSet.newBuilder();
8720          try {
8721            boolean done = false;
8722            while (!done) {
8723              int tag = input.readTag();
8724              switch (tag) {
8725                case 0:
8726                  done = true;
8727                  break;
8728                default: {
8729                  if (!parseUnknownField(input, unknownFields,
8730                                         extensionRegistry, tag)) {
8731                    done = true;
8732                  }
8733                  break;
8734                }
8735                case 8: {
8736                  bitField0_ |= 0x00000001;
8737                  isFormatted_ = input.readBool();
8738                  break;
8739                }
8740              }
8741            }
8742          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
8743            throw e.setUnfinishedMessage(this);
8744          } catch (java.io.IOException e) {
8745            throw new com.google.protobuf.InvalidProtocolBufferException(
8746                e.getMessage()).setUnfinishedMessage(this);
8747          } finally {
8748            this.unknownFields = unknownFields.build();
8749            makeExtensionsImmutable();
8750          }
8751        }
8752        public static final com.google.protobuf.Descriptors.Descriptor
8753            getDescriptor() {
8754          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor;
8755        }
8756    
8757        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
8758            internalGetFieldAccessorTable() {
8759          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedResponseProto_fieldAccessorTable
8760              .ensureFieldAccessorsInitialized(
8761                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.Builder.class);
8762        }
8763    
8764        public static com.google.protobuf.Parser<IsFormattedResponseProto> PARSER =
8765            new com.google.protobuf.AbstractParser<IsFormattedResponseProto>() {
8766          public IsFormattedResponseProto parsePartialFrom(
8767              com.google.protobuf.CodedInputStream input,
8768              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8769              throws com.google.protobuf.InvalidProtocolBufferException {
8770            return new IsFormattedResponseProto(input, extensionRegistry);
8771          }
8772        };
8773    
8774        @java.lang.Override
8775        public com.google.protobuf.Parser<IsFormattedResponseProto> getParserForType() {
8776          return PARSER;
8777        }
8778    
8779        private int bitField0_;
8780        // required bool isFormatted = 1;
8781        public static final int ISFORMATTED_FIELD_NUMBER = 1;
8782        private boolean isFormatted_;
8783        /**
8784         * <code>required bool isFormatted = 1;</code>
8785         */
8786        public boolean hasIsFormatted() {
8787          return ((bitField0_ & 0x00000001) == 0x00000001);
8788        }
8789        /**
8790         * <code>required bool isFormatted = 1;</code>
8791         */
8792        public boolean getIsFormatted() {
8793          return isFormatted_;
8794        }
8795    
8796        private void initFields() {
8797          isFormatted_ = false;
8798        }
8799        private byte memoizedIsInitialized = -1;
8800        public final boolean isInitialized() {
8801          byte isInitialized = memoizedIsInitialized;
8802          if (isInitialized != -1) return isInitialized == 1;
8803    
8804          if (!hasIsFormatted()) {
8805            memoizedIsInitialized = 0;
8806            return false;
8807          }
8808          memoizedIsInitialized = 1;
8809          return true;
8810        }
8811    
8812        public void writeTo(com.google.protobuf.CodedOutputStream output)
8813                            throws java.io.IOException {
8814          getSerializedSize();
8815          if (((bitField0_ & 0x00000001) == 0x00000001)) {
8816            output.writeBool(1, isFormatted_);
8817          }
8818          getUnknownFields().writeTo(output);
8819        }
8820    
8821        private int memoizedSerializedSize = -1;
8822        public int getSerializedSize() {
8823          int size = memoizedSerializedSize;
8824          if (size != -1) return size;
8825    
8826          size = 0;
8827          if (((bitField0_ & 0x00000001) == 0x00000001)) {
8828            size += com.google.protobuf.CodedOutputStream
8829              .computeBoolSize(1, isFormatted_);
8830          }
8831          size += getUnknownFields().getSerializedSize();
8832          memoizedSerializedSize = size;
8833          return size;
8834        }
8835    
8836        private static final long serialVersionUID = 0L;
8837        @java.lang.Override
8838        protected java.lang.Object writeReplace()
8839            throws java.io.ObjectStreamException {
8840          return super.writeReplace();
8841        }
8842    
8843        @java.lang.Override
8844        public boolean equals(final java.lang.Object obj) {
8845          if (obj == this) {
8846           return true;
8847          }
8848          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto)) {
8849            return super.equals(obj);
8850          }
8851          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) obj;
8852    
8853          boolean result = true;
8854          result = result && (hasIsFormatted() == other.hasIsFormatted());
8855          if (hasIsFormatted()) {
8856            result = result && (getIsFormatted()
8857                == other.getIsFormatted());
8858          }
8859          result = result &&
8860              getUnknownFields().equals(other.getUnknownFields());
8861          return result;
8862        }
8863    
8864        private int memoizedHashCode = 0;
8865        @java.lang.Override
8866        public int hashCode() {
8867          if (memoizedHashCode != 0) {
8868            return memoizedHashCode;
8869          }
8870          int hash = 41;
8871          hash = (19 * hash) + getDescriptorForType().hashCode();
8872          if (hasIsFormatted()) {
8873            hash = (37 * hash) + ISFORMATTED_FIELD_NUMBER;
8874            hash = (53 * hash) + hashBoolean(getIsFormatted());
8875          }
8876          hash = (29 * hash) + getUnknownFields().hashCode();
8877          memoizedHashCode = hash;
8878          return hash;
8879        }
8880    
8881        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
8882            com.google.protobuf.ByteString data)
8883            throws com.google.protobuf.InvalidProtocolBufferException {
8884          return PARSER.parseFrom(data);
8885        }
8886        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
8887            com.google.protobuf.ByteString data,
8888            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8889            throws com.google.protobuf.InvalidProtocolBufferException {
8890          return PARSER.parseFrom(data, extensionRegistry);
8891        }
8892        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(byte[] data)
8893            throws com.google.protobuf.InvalidProtocolBufferException {
8894          return PARSER.parseFrom(data);
8895        }
8896        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
8897            byte[] data,
8898            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8899            throws com.google.protobuf.InvalidProtocolBufferException {
8900          return PARSER.parseFrom(data, extensionRegistry);
8901        }
8902        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(java.io.InputStream input)
8903            throws java.io.IOException {
8904          return PARSER.parseFrom(input);
8905        }
8906        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
8907            java.io.InputStream input,
8908            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8909            throws java.io.IOException {
8910          return PARSER.parseFrom(input, extensionRegistry);
8911        }
8912        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseDelimitedFrom(java.io.InputStream input)
8913            throws java.io.IOException {
8914          return PARSER.parseDelimitedFrom(input);
8915        }
8916        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseDelimitedFrom(
8917            java.io.InputStream input,
8918            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8919            throws java.io.IOException {
8920          return PARSER.parseDelimitedFrom(input, extensionRegistry);
8921        }
8922        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
8923            com.google.protobuf.CodedInputStream input)
8924            throws java.io.IOException {
8925          return PARSER.parseFrom(input);
8926        }
8927        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
8928            com.google.protobuf.CodedInputStream input,
8929            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8930            throws java.io.IOException {
8931          return PARSER.parseFrom(input, extensionRegistry);
8932        }
8933    
8934        public static Builder newBuilder() { return Builder.create(); }
8935        public Builder newBuilderForType() { return newBuilder(); }
8936        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto prototype) {
8937          return newBuilder().mergeFrom(prototype);
8938        }
8939        public Builder toBuilder() { return newBuilder(this); }
8940    
8941        @java.lang.Override
8942        protected Builder newBuilderForType(
8943            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8944          Builder builder = new Builder(parent);
8945          return builder;
8946        }
8947        /**
8948         * Protobuf type {@code hadoop.hdfs.IsFormattedResponseProto}
8949         */
8950        public static final class Builder extends
8951            com.google.protobuf.GeneratedMessage.Builder<Builder>
8952           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProtoOrBuilder {
8953          public static final com.google.protobuf.Descriptors.Descriptor
8954              getDescriptor() {
8955            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor;
8956          }
8957    
8958          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
8959              internalGetFieldAccessorTable() {
8960            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedResponseProto_fieldAccessorTable
8961                .ensureFieldAccessorsInitialized(
8962                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.Builder.class);
8963          }
8964    
8965          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.newBuilder()
8966          private Builder() {
8967            maybeForceBuilderInitialization();
8968          }
8969    
8970          private Builder(
8971              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8972            super(parent);
8973            maybeForceBuilderInitialization();
8974          }
8975          private void maybeForceBuilderInitialization() {
8976            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
8977            }
8978          }
8979          private static Builder create() {
8980            return new Builder();
8981          }
8982    
8983          public Builder clear() {
8984            super.clear();
8985            isFormatted_ = false;
8986            bitField0_ = (bitField0_ & ~0x00000001);
8987            return this;
8988          }
8989    
8990          public Builder clone() {
8991            return create().mergeFrom(buildPartial());
8992          }
8993    
8994          public com.google.protobuf.Descriptors.Descriptor
8995              getDescriptorForType() {
8996            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor;
8997          }
8998    
8999          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto getDefaultInstanceForType() {
9000            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance();
9001          }
9002    
9003          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto build() {
9004            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto result = buildPartial();
9005            if (!result.isInitialized()) {
9006              throw newUninitializedMessageException(result);
9007            }
9008            return result;
9009          }
9010    
9011          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto buildPartial() {
9012            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto(this);
9013            int from_bitField0_ = bitField0_;
9014            int to_bitField0_ = 0;
9015            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
9016              to_bitField0_ |= 0x00000001;
9017            }
9018            result.isFormatted_ = isFormatted_;
9019            result.bitField0_ = to_bitField0_;
9020            onBuilt();
9021            return result;
9022          }
9023    
9024          public Builder mergeFrom(com.google.protobuf.Message other) {
9025            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) {
9026              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto)other);
9027            } else {
9028              super.mergeFrom(other);
9029              return this;
9030            }
9031          }
9032    
9033          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto other) {
9034            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance()) return this;
9035            if (other.hasIsFormatted()) {
9036              setIsFormatted(other.getIsFormatted());
9037            }
9038            this.mergeUnknownFields(other.getUnknownFields());
9039            return this;
9040          }
9041    
9042          public final boolean isInitialized() {
9043            if (!hasIsFormatted()) {
9044              
9045              return false;
9046            }
9047            return true;
9048          }
9049    
9050          public Builder mergeFrom(
9051              com.google.protobuf.CodedInputStream input,
9052              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9053              throws java.io.IOException {
9054            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parsedMessage = null;
9055            try {
9056              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
9057            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
9058              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) e.getUnfinishedMessage();
9059              throw e;
9060            } finally {
9061              if (parsedMessage != null) {
9062                mergeFrom(parsedMessage);
9063              }
9064            }
9065            return this;
9066          }
9067          private int bitField0_;
9068    
9069          // required bool isFormatted = 1;
9070          private boolean isFormatted_ ;
9071          /**
9072           * <code>required bool isFormatted = 1;</code>
9073           */
9074          public boolean hasIsFormatted() {
9075            return ((bitField0_ & 0x00000001) == 0x00000001);
9076          }
9077          /**
9078           * <code>required bool isFormatted = 1;</code>
9079           */
9080          public boolean getIsFormatted() {
9081            return isFormatted_;
9082          }
9083          /**
9084           * <code>required bool isFormatted = 1;</code>
9085           */
9086          public Builder setIsFormatted(boolean value) {
9087            bitField0_ |= 0x00000001;
9088            isFormatted_ = value;
9089            onChanged();
9090            return this;
9091          }
9092          /**
9093           * <code>required bool isFormatted = 1;</code>
9094           */
9095          public Builder clearIsFormatted() {
9096            bitField0_ = (bitField0_ & ~0x00000001);
9097            isFormatted_ = false;
9098            onChanged();
9099            return this;
9100          }
9101    
9102          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.IsFormattedResponseProto)
9103        }
9104    
9105        static {
9106          defaultInstance = new IsFormattedResponseProto(true);
9107          defaultInstance.initFields();
9108        }
9109    
9110        // @@protoc_insertion_point(class_scope:hadoop.hdfs.IsFormattedResponseProto)
9111      }
9112    
9113      public interface GetJournalStateRequestProtoOrBuilder
9114          extends com.google.protobuf.MessageOrBuilder {
9115    
9116        // required .hadoop.hdfs.JournalIdProto jid = 1;
9117        /**
9118         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9119         */
9120        boolean hasJid();
9121        /**
9122         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9123         */
9124        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
9125        /**
9126         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9127         */
9128        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
9129      }
9130      /**
9131       * Protobuf type {@code hadoop.hdfs.GetJournalStateRequestProto}
9132       *
9133       * <pre>
9134       **
9135       * getJournalState()
9136       * </pre>
9137       */
9138      public static final class GetJournalStateRequestProto extends
9139          com.google.protobuf.GeneratedMessage
9140          implements GetJournalStateRequestProtoOrBuilder {
9141        // Use GetJournalStateRequestProto.newBuilder() to construct.
9142        private GetJournalStateRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
9143          super(builder);
9144          this.unknownFields = builder.getUnknownFields();
9145        }
9146        private GetJournalStateRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
9147    
9148        private static final GetJournalStateRequestProto defaultInstance;
9149        public static GetJournalStateRequestProto getDefaultInstance() {
9150          return defaultInstance;
9151        }
9152    
9153        public GetJournalStateRequestProto getDefaultInstanceForType() {
9154          return defaultInstance;
9155        }
9156    
9157        private final com.google.protobuf.UnknownFieldSet unknownFields;
9158        @java.lang.Override
9159        public final com.google.protobuf.UnknownFieldSet
9160            getUnknownFields() {
9161          return this.unknownFields;
9162        }
9163        private GetJournalStateRequestProto(
9164            com.google.protobuf.CodedInputStream input,
9165            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9166            throws com.google.protobuf.InvalidProtocolBufferException {
9167          initFields();
9168          int mutable_bitField0_ = 0;
9169          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
9170              com.google.protobuf.UnknownFieldSet.newBuilder();
9171          try {
9172            boolean done = false;
9173            while (!done) {
9174              int tag = input.readTag();
9175              switch (tag) {
9176                case 0:
9177                  done = true;
9178                  break;
9179                default: {
9180                  if (!parseUnknownField(input, unknownFields,
9181                                         extensionRegistry, tag)) {
9182                    done = true;
9183                  }
9184                  break;
9185                }
9186                case 10: {
9187                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
9188                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
9189                    subBuilder = jid_.toBuilder();
9190                  }
9191                  jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
9192                  if (subBuilder != null) {
9193                    subBuilder.mergeFrom(jid_);
9194                    jid_ = subBuilder.buildPartial();
9195                  }
9196                  bitField0_ |= 0x00000001;
9197                  break;
9198                }
9199              }
9200            }
9201          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
9202            throw e.setUnfinishedMessage(this);
9203          } catch (java.io.IOException e) {
9204            throw new com.google.protobuf.InvalidProtocolBufferException(
9205                e.getMessage()).setUnfinishedMessage(this);
9206          } finally {
9207            this.unknownFields = unknownFields.build();
9208            makeExtensionsImmutable();
9209          }
9210        }
9211        public static final com.google.protobuf.Descriptors.Descriptor
9212            getDescriptor() {
9213          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor;
9214        }
9215    
9216        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
9217            internalGetFieldAccessorTable() {
9218          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateRequestProto_fieldAccessorTable
9219              .ensureFieldAccessorsInitialized(
9220                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.Builder.class);
9221        }
9222    
9223        public static com.google.protobuf.Parser<GetJournalStateRequestProto> PARSER =
9224            new com.google.protobuf.AbstractParser<GetJournalStateRequestProto>() {
9225          public GetJournalStateRequestProto parsePartialFrom(
9226              com.google.protobuf.CodedInputStream input,
9227              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9228              throws com.google.protobuf.InvalidProtocolBufferException {
9229            return new GetJournalStateRequestProto(input, extensionRegistry);
9230          }
9231        };
9232    
9233        @java.lang.Override
9234        public com.google.protobuf.Parser<GetJournalStateRequestProto> getParserForType() {
9235          return PARSER;
9236        }
9237    
9238        private int bitField0_;
9239        // required .hadoop.hdfs.JournalIdProto jid = 1;
9240        public static final int JID_FIELD_NUMBER = 1;
9241        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
9242        /**
9243         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9244         */
9245        public boolean hasJid() {
9246          return ((bitField0_ & 0x00000001) == 0x00000001);
9247        }
9248        /**
9249         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9250         */
9251        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
9252          return jid_;
9253        }
9254        /**
9255         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9256         */
9257        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
9258          return jid_;
9259        }
9260    
9261        private void initFields() {
9262          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
9263        }
9264        private byte memoizedIsInitialized = -1;
9265        public final boolean isInitialized() {
9266          byte isInitialized = memoizedIsInitialized;
9267          if (isInitialized != -1) return isInitialized == 1;
9268    
9269          if (!hasJid()) {
9270            memoizedIsInitialized = 0;
9271            return false;
9272          }
9273          if (!getJid().isInitialized()) {
9274            memoizedIsInitialized = 0;
9275            return false;
9276          }
9277          memoizedIsInitialized = 1;
9278          return true;
9279        }
9280    
9281        public void writeTo(com.google.protobuf.CodedOutputStream output)
9282                            throws java.io.IOException {
9283          getSerializedSize();
9284          if (((bitField0_ & 0x00000001) == 0x00000001)) {
9285            output.writeMessage(1, jid_);
9286          }
9287          getUnknownFields().writeTo(output);
9288        }
9289    
9290        private int memoizedSerializedSize = -1;
9291        public int getSerializedSize() {
9292          int size = memoizedSerializedSize;
9293          if (size != -1) return size;
9294    
9295          size = 0;
9296          if (((bitField0_ & 0x00000001) == 0x00000001)) {
9297            size += com.google.protobuf.CodedOutputStream
9298              .computeMessageSize(1, jid_);
9299          }
9300          size += getUnknownFields().getSerializedSize();
9301          memoizedSerializedSize = size;
9302          return size;
9303        }
9304    
9305        private static final long serialVersionUID = 0L;
9306        @java.lang.Override
9307        protected java.lang.Object writeReplace()
9308            throws java.io.ObjectStreamException {
9309          return super.writeReplace();
9310        }
9311    
9312        @java.lang.Override
9313        public boolean equals(final java.lang.Object obj) {
9314          if (obj == this) {
9315           return true;
9316          }
9317          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)) {
9318            return super.equals(obj);
9319          }
9320          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto) obj;
9321    
9322          boolean result = true;
9323          result = result && (hasJid() == other.hasJid());
9324          if (hasJid()) {
9325            result = result && getJid()
9326                .equals(other.getJid());
9327          }
9328          result = result &&
9329              getUnknownFields().equals(other.getUnknownFields());
9330          return result;
9331        }
9332    
9333        private int memoizedHashCode = 0;
9334        @java.lang.Override
9335        public int hashCode() {
9336          if (memoizedHashCode != 0) {
9337            return memoizedHashCode;
9338          }
9339          int hash = 41;
9340          hash = (19 * hash) + getDescriptorForType().hashCode();
9341          if (hasJid()) {
9342            hash = (37 * hash) + JID_FIELD_NUMBER;
9343            hash = (53 * hash) + getJid().hashCode();
9344          }
9345          hash = (29 * hash) + getUnknownFields().hashCode();
9346          memoizedHashCode = hash;
9347          return hash;
9348        }
9349    
9350        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
9351            com.google.protobuf.ByteString data)
9352            throws com.google.protobuf.InvalidProtocolBufferException {
9353          return PARSER.parseFrom(data);
9354        }
9355        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
9356            com.google.protobuf.ByteString data,
9357            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9358            throws com.google.protobuf.InvalidProtocolBufferException {
9359          return PARSER.parseFrom(data, extensionRegistry);
9360        }
9361        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(byte[] data)
9362            throws com.google.protobuf.InvalidProtocolBufferException {
9363          return PARSER.parseFrom(data);
9364        }
9365        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
9366            byte[] data,
9367            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9368            throws com.google.protobuf.InvalidProtocolBufferException {
9369          return PARSER.parseFrom(data, extensionRegistry);
9370        }
9371        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(java.io.InputStream input)
9372            throws java.io.IOException {
9373          return PARSER.parseFrom(input);
9374        }
9375        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
9376            java.io.InputStream input,
9377            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9378            throws java.io.IOException {
9379          return PARSER.parseFrom(input, extensionRegistry);
9380        }
9381        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseDelimitedFrom(java.io.InputStream input)
9382            throws java.io.IOException {
9383          return PARSER.parseDelimitedFrom(input);
9384        }
9385        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseDelimitedFrom(
9386            java.io.InputStream input,
9387            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9388            throws java.io.IOException {
9389          return PARSER.parseDelimitedFrom(input, extensionRegistry);
9390        }
9391        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
9392            com.google.protobuf.CodedInputStream input)
9393            throws java.io.IOException {
9394          return PARSER.parseFrom(input);
9395        }
9396        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
9397            com.google.protobuf.CodedInputStream input,
9398            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9399            throws java.io.IOException {
9400          return PARSER.parseFrom(input, extensionRegistry);
9401        }
9402    
9403        public static Builder newBuilder() { return Builder.create(); }
9404        public Builder newBuilderForType() { return newBuilder(); }
9405        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto prototype) {
9406          return newBuilder().mergeFrom(prototype);
9407        }
9408        public Builder toBuilder() { return newBuilder(this); }
9409    
9410        @java.lang.Override
9411        protected Builder newBuilderForType(
9412            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
9413          Builder builder = new Builder(parent);
9414          return builder;
9415        }
9416        /**
9417         * Protobuf type {@code hadoop.hdfs.GetJournalStateRequestProto}
9418         *
9419         * <pre>
9420         **
9421         * getJournalState()
9422         * </pre>
9423         */
9424        public static final class Builder extends
9425            com.google.protobuf.GeneratedMessage.Builder<Builder>
9426           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProtoOrBuilder {
9427          public static final com.google.protobuf.Descriptors.Descriptor
9428              getDescriptor() {
9429            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor;
9430          }
9431    
9432          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
9433              internalGetFieldAccessorTable() {
9434            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateRequestProto_fieldAccessorTable
9435                .ensureFieldAccessorsInitialized(
9436                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.Builder.class);
9437          }
9438    
9439          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.newBuilder()
9440          private Builder() {
9441            maybeForceBuilderInitialization();
9442          }
9443    
9444          private Builder(
9445              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
9446            super(parent);
9447            maybeForceBuilderInitialization();
9448          }
9449          private void maybeForceBuilderInitialization() {
9450            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
9451              getJidFieldBuilder();
9452            }
9453          }
9454          private static Builder create() {
9455            return new Builder();
9456          }
9457    
9458          public Builder clear() {
9459            super.clear();
9460            if (jidBuilder_ == null) {
9461              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
9462            } else {
9463              jidBuilder_.clear();
9464            }
9465            bitField0_ = (bitField0_ & ~0x00000001);
9466            return this;
9467          }
9468    
9469          public Builder clone() {
9470            return create().mergeFrom(buildPartial());
9471          }
9472    
9473          public com.google.protobuf.Descriptors.Descriptor
9474              getDescriptorForType() {
9475            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor;
9476          }
9477    
9478          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto getDefaultInstanceForType() {
9479            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance();
9480          }
9481    
9482          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto build() {
9483            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto result = buildPartial();
9484            if (!result.isInitialized()) {
9485              throw newUninitializedMessageException(result);
9486            }
9487            return result;
9488          }
9489    
9490          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto buildPartial() {
9491            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto(this);
9492            int from_bitField0_ = bitField0_;
9493            int to_bitField0_ = 0;
9494            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
9495              to_bitField0_ |= 0x00000001;
9496            }
9497            if (jidBuilder_ == null) {
9498              result.jid_ = jid_;
9499            } else {
9500              result.jid_ = jidBuilder_.build();
9501            }
9502            result.bitField0_ = to_bitField0_;
9503            onBuilt();
9504            return result;
9505          }
9506    
9507          public Builder mergeFrom(com.google.protobuf.Message other) {
9508            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto) {
9509              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)other);
9510            } else {
9511              super.mergeFrom(other);
9512              return this;
9513            }
9514          }
9515    
9516          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto other) {
9517            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance()) return this;
9518            if (other.hasJid()) {
9519              mergeJid(other.getJid());
9520            }
9521            this.mergeUnknownFields(other.getUnknownFields());
9522            return this;
9523          }
9524    
9525          public final boolean isInitialized() {
9526            if (!hasJid()) {
9527              
9528              return false;
9529            }
9530            if (!getJid().isInitialized()) {
9531              
9532              return false;
9533            }
9534            return true;
9535          }
9536    
9537          public Builder mergeFrom(
9538              com.google.protobuf.CodedInputStream input,
9539              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9540              throws java.io.IOException {
9541            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parsedMessage = null;
9542            try {
9543              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
9544            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
9545              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto) e.getUnfinishedMessage();
9546              throw e;
9547            } finally {
9548              if (parsedMessage != null) {
9549                mergeFrom(parsedMessage);
9550              }
9551            }
9552            return this;
9553          }
9554          private int bitField0_;
9555    
9556          // required .hadoop.hdfs.JournalIdProto jid = 1;
9557          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
9558          private com.google.protobuf.SingleFieldBuilder<
9559              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
9560          /**
9561           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9562           */
9563          public boolean hasJid() {
9564            return ((bitField0_ & 0x00000001) == 0x00000001);
9565          }
9566          /**
9567           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9568           */
9569          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
9570            if (jidBuilder_ == null) {
9571              return jid_;
9572            } else {
9573              return jidBuilder_.getMessage();
9574            }
9575          }
9576          /**
9577           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9578           */
9579          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
9580            if (jidBuilder_ == null) {
9581              if (value == null) {
9582                throw new NullPointerException();
9583              }
9584              jid_ = value;
9585              onChanged();
9586            } else {
9587              jidBuilder_.setMessage(value);
9588            }
9589            bitField0_ |= 0x00000001;
9590            return this;
9591          }
9592          /**
9593           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9594           */
9595          public Builder setJid(
9596              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
9597            if (jidBuilder_ == null) {
9598              jid_ = builderForValue.build();
9599              onChanged();
9600            } else {
9601              jidBuilder_.setMessage(builderForValue.build());
9602            }
9603            bitField0_ |= 0x00000001;
9604            return this;
9605          }
9606          /**
9607           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9608           */
9609          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
9610            if (jidBuilder_ == null) {
9611              if (((bitField0_ & 0x00000001) == 0x00000001) &&
9612                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
9613                jid_ =
9614                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
9615              } else {
9616                jid_ = value;
9617              }
9618              onChanged();
9619            } else {
9620              jidBuilder_.mergeFrom(value);
9621            }
9622            bitField0_ |= 0x00000001;
9623            return this;
9624          }
9625          /**
9626           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9627           */
9628          public Builder clearJid() {
9629            if (jidBuilder_ == null) {
9630              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
9631              onChanged();
9632            } else {
9633              jidBuilder_.clear();
9634            }
9635            bitField0_ = (bitField0_ & ~0x00000001);
9636            return this;
9637          }
9638          /**
9639           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9640           */
9641          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
9642            bitField0_ |= 0x00000001;
9643            onChanged();
9644            return getJidFieldBuilder().getBuilder();
9645          }
9646          /**
9647           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9648           */
9649          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
9650            if (jidBuilder_ != null) {
9651              return jidBuilder_.getMessageOrBuilder();
9652            } else {
9653              return jid_;
9654            }
9655          }
9656          /**
9657           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
9658           */
9659          private com.google.protobuf.SingleFieldBuilder<
9660              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
9661              getJidFieldBuilder() {
9662            if (jidBuilder_ == null) {
9663              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
9664                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
9665                      jid_,
9666                      getParentForChildren(),
9667                      isClean());
9668              jid_ = null;
9669            }
9670            return jidBuilder_;
9671          }
9672    
9673          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetJournalStateRequestProto)
9674        }
9675    
9676        static {
9677          defaultInstance = new GetJournalStateRequestProto(true);
9678          defaultInstance.initFields();
9679        }
9680    
9681        // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetJournalStateRequestProto)
9682      }
9683    
9684      public interface GetJournalStateResponseProtoOrBuilder
9685          extends com.google.protobuf.MessageOrBuilder {
9686    
9687        // required uint64 lastPromisedEpoch = 1;
9688        /**
9689         * <code>required uint64 lastPromisedEpoch = 1;</code>
9690         */
9691        boolean hasLastPromisedEpoch();
9692        /**
9693         * <code>required uint64 lastPromisedEpoch = 1;</code>
9694         */
9695        long getLastPromisedEpoch();
9696    
9697        // required uint32 httpPort = 2;
9698        /**
9699         * <code>required uint32 httpPort = 2;</code>
9700         *
9701         * <pre>
9702         * Deprecated by fromURL
9703         * </pre>
9704         */
9705        boolean hasHttpPort();
9706        /**
9707         * <code>required uint32 httpPort = 2;</code>
9708         *
9709         * <pre>
9710         * Deprecated by fromURL
9711         * </pre>
9712         */
9713        int getHttpPort();
9714    
9715        // optional string fromURL = 3;
9716        /**
9717         * <code>optional string fromURL = 3;</code>
9718         */
9719        boolean hasFromURL();
9720        /**
9721         * <code>optional string fromURL = 3;</code>
9722         */
9723        java.lang.String getFromURL();
9724        /**
9725         * <code>optional string fromURL = 3;</code>
9726         */
9727        com.google.protobuf.ByteString
9728            getFromURLBytes();
9729      }
9730      /**
9731       * Protobuf type {@code hadoop.hdfs.GetJournalStateResponseProto}
9732       */
9733      public static final class GetJournalStateResponseProto extends
9734          com.google.protobuf.GeneratedMessage
9735          implements GetJournalStateResponseProtoOrBuilder {
9736        // Use GetJournalStateResponseProto.newBuilder() to construct.
9737        private GetJournalStateResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
9738          super(builder);
9739          this.unknownFields = builder.getUnknownFields();
9740        }
9741        private GetJournalStateResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
9742    
9743        private static final GetJournalStateResponseProto defaultInstance;
9744        public static GetJournalStateResponseProto getDefaultInstance() {
9745          return defaultInstance;
9746        }
9747    
9748        public GetJournalStateResponseProto getDefaultInstanceForType() {
9749          return defaultInstance;
9750        }
9751    
9752        private final com.google.protobuf.UnknownFieldSet unknownFields;
9753        @java.lang.Override
9754        public final com.google.protobuf.UnknownFieldSet
9755            getUnknownFields() {
9756          return this.unknownFields;
9757        }
9758        private GetJournalStateResponseProto(
9759            com.google.protobuf.CodedInputStream input,
9760            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9761            throws com.google.protobuf.InvalidProtocolBufferException {
9762          initFields();
9763          int mutable_bitField0_ = 0;
9764          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
9765              com.google.protobuf.UnknownFieldSet.newBuilder();
9766          try {
9767            boolean done = false;
9768            while (!done) {
9769              int tag = input.readTag();
9770              switch (tag) {
9771                case 0:
9772                  done = true;
9773                  break;
9774                default: {
9775                  if (!parseUnknownField(input, unknownFields,
9776                                         extensionRegistry, tag)) {
9777                    done = true;
9778                  }
9779                  break;
9780                }
9781                case 8: {
9782                  bitField0_ |= 0x00000001;
9783                  lastPromisedEpoch_ = input.readUInt64();
9784                  break;
9785                }
9786                case 16: {
9787                  bitField0_ |= 0x00000002;
9788                  httpPort_ = input.readUInt32();
9789                  break;
9790                }
9791                case 26: {
9792                  bitField0_ |= 0x00000004;
9793                  fromURL_ = input.readBytes();
9794                  break;
9795                }
9796              }
9797            }
9798          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
9799            throw e.setUnfinishedMessage(this);
9800          } catch (java.io.IOException e) {
9801            throw new com.google.protobuf.InvalidProtocolBufferException(
9802                e.getMessage()).setUnfinishedMessage(this);
9803          } finally {
9804            this.unknownFields = unknownFields.build();
9805            makeExtensionsImmutable();
9806          }
9807        }
9808        public static final com.google.protobuf.Descriptors.Descriptor
9809            getDescriptor() {
9810          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor;
9811        }
9812    
9813        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
9814            internalGetFieldAccessorTable() {
9815          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateResponseProto_fieldAccessorTable
9816              .ensureFieldAccessorsInitialized(
9817                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.Builder.class);
9818        }
9819    
9820        public static com.google.protobuf.Parser<GetJournalStateResponseProto> PARSER =
9821            new com.google.protobuf.AbstractParser<GetJournalStateResponseProto>() {
9822          public GetJournalStateResponseProto parsePartialFrom(
9823              com.google.protobuf.CodedInputStream input,
9824              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9825              throws com.google.protobuf.InvalidProtocolBufferException {
9826            return new GetJournalStateResponseProto(input, extensionRegistry);
9827          }
9828        };
9829    
9830        @java.lang.Override
9831        public com.google.protobuf.Parser<GetJournalStateResponseProto> getParserForType() {
9832          return PARSER;
9833        }
9834    
9835        private int bitField0_;
9836        // required uint64 lastPromisedEpoch = 1;
9837        public static final int LASTPROMISEDEPOCH_FIELD_NUMBER = 1;
9838        private long lastPromisedEpoch_;
9839        /**
9840         * <code>required uint64 lastPromisedEpoch = 1;</code>
9841         */
9842        public boolean hasLastPromisedEpoch() {
9843          return ((bitField0_ & 0x00000001) == 0x00000001);
9844        }
9845        /**
9846         * <code>required uint64 lastPromisedEpoch = 1;</code>
9847         */
9848        public long getLastPromisedEpoch() {
9849          return lastPromisedEpoch_;
9850        }
9851    
9852        // required uint32 httpPort = 2;
9853        public static final int HTTPPORT_FIELD_NUMBER = 2;
9854        private int httpPort_;
9855        /**
9856         * <code>required uint32 httpPort = 2;</code>
9857         *
9858         * <pre>
9859         * Deprecated by fromURL
9860         * </pre>
9861         */
9862        public boolean hasHttpPort() {
9863          return ((bitField0_ & 0x00000002) == 0x00000002);
9864        }
9865        /**
9866         * <code>required uint32 httpPort = 2;</code>
9867         *
9868         * <pre>
9869         * Deprecated by fromURL
9870         * </pre>
9871         */
9872        public int getHttpPort() {
9873          return httpPort_;
9874        }
9875    
9876        // optional string fromURL = 3;
9877        public static final int FROMURL_FIELD_NUMBER = 3;
9878        private java.lang.Object fromURL_;
9879        /**
9880         * <code>optional string fromURL = 3;</code>
9881         */
9882        public boolean hasFromURL() {
9883          return ((bitField0_ & 0x00000004) == 0x00000004);
9884        }
9885        /**
9886         * <code>optional string fromURL = 3;</code>
9887         */
9888        public java.lang.String getFromURL() {
9889          java.lang.Object ref = fromURL_;
9890          if (ref instanceof java.lang.String) {
9891            return (java.lang.String) ref;
9892          } else {
9893            com.google.protobuf.ByteString bs = 
9894                (com.google.protobuf.ByteString) ref;
9895            java.lang.String s = bs.toStringUtf8();
9896            if (bs.isValidUtf8()) {
9897              fromURL_ = s;
9898            }
9899            return s;
9900          }
9901        }
9902        /**
9903         * <code>optional string fromURL = 3;</code>
9904         */
9905        public com.google.protobuf.ByteString
9906            getFromURLBytes() {
9907          java.lang.Object ref = fromURL_;
9908          if (ref instanceof java.lang.String) {
9909            com.google.protobuf.ByteString b = 
9910                com.google.protobuf.ByteString.copyFromUtf8(
9911                    (java.lang.String) ref);
9912            fromURL_ = b;
9913            return b;
9914          } else {
9915            return (com.google.protobuf.ByteString) ref;
9916          }
9917        }
9918    
9919        private void initFields() {
9920          lastPromisedEpoch_ = 0L;
9921          httpPort_ = 0;
9922          fromURL_ = "";
9923        }
9924        private byte memoizedIsInitialized = -1;
9925        public final boolean isInitialized() {
9926          byte isInitialized = memoizedIsInitialized;
9927          if (isInitialized != -1) return isInitialized == 1;
9928    
9929          if (!hasLastPromisedEpoch()) {
9930            memoizedIsInitialized = 0;
9931            return false;
9932          }
9933          if (!hasHttpPort()) {
9934            memoizedIsInitialized = 0;
9935            return false;
9936          }
9937          memoizedIsInitialized = 1;
9938          return true;
9939        }
9940    
9941        public void writeTo(com.google.protobuf.CodedOutputStream output)
9942                            throws java.io.IOException {
9943          getSerializedSize();
9944          if (((bitField0_ & 0x00000001) == 0x00000001)) {
9945            output.writeUInt64(1, lastPromisedEpoch_);
9946          }
9947          if (((bitField0_ & 0x00000002) == 0x00000002)) {
9948            output.writeUInt32(2, httpPort_);
9949          }
9950          if (((bitField0_ & 0x00000004) == 0x00000004)) {
9951            output.writeBytes(3, getFromURLBytes());
9952          }
9953          getUnknownFields().writeTo(output);
9954        }
9955    
9956        private int memoizedSerializedSize = -1;
9957        public int getSerializedSize() {
9958          int size = memoizedSerializedSize;
9959          if (size != -1) return size;
9960    
9961          size = 0;
9962          if (((bitField0_ & 0x00000001) == 0x00000001)) {
9963            size += com.google.protobuf.CodedOutputStream
9964              .computeUInt64Size(1, lastPromisedEpoch_);
9965          }
9966          if (((bitField0_ & 0x00000002) == 0x00000002)) {
9967            size += com.google.protobuf.CodedOutputStream
9968              .computeUInt32Size(2, httpPort_);
9969          }
9970          if (((bitField0_ & 0x00000004) == 0x00000004)) {
9971            size += com.google.protobuf.CodedOutputStream
9972              .computeBytesSize(3, getFromURLBytes());
9973          }
9974          size += getUnknownFields().getSerializedSize();
9975          memoizedSerializedSize = size;
9976          return size;
9977        }
9978    
9979        private static final long serialVersionUID = 0L;
9980        @java.lang.Override
9981        protected java.lang.Object writeReplace()
9982            throws java.io.ObjectStreamException {
9983          return super.writeReplace();
9984        }
9985    
9986        @java.lang.Override
9987        public boolean equals(final java.lang.Object obj) {
9988          if (obj == this) {
9989           return true;
9990          }
9991          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto)) {
9992            return super.equals(obj);
9993          }
9994          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) obj;
9995    
9996          boolean result = true;
9997          result = result && (hasLastPromisedEpoch() == other.hasLastPromisedEpoch());
9998          if (hasLastPromisedEpoch()) {
9999            result = result && (getLastPromisedEpoch()
10000                == other.getLastPromisedEpoch());
10001          }
10002          result = result && (hasHttpPort() == other.hasHttpPort());
10003          if (hasHttpPort()) {
10004            result = result && (getHttpPort()
10005                == other.getHttpPort());
10006          }
10007          result = result && (hasFromURL() == other.hasFromURL());
10008          if (hasFromURL()) {
10009            result = result && getFromURL()
10010                .equals(other.getFromURL());
10011          }
10012          result = result &&
10013              getUnknownFields().equals(other.getUnknownFields());
10014          return result;
10015        }
10016    
10017        private int memoizedHashCode = 0;
10018        @java.lang.Override
10019        public int hashCode() {
10020          if (memoizedHashCode != 0) {
10021            return memoizedHashCode;
10022          }
10023          int hash = 41;
10024          hash = (19 * hash) + getDescriptorForType().hashCode();
10025          if (hasLastPromisedEpoch()) {
10026            hash = (37 * hash) + LASTPROMISEDEPOCH_FIELD_NUMBER;
10027            hash = (53 * hash) + hashLong(getLastPromisedEpoch());
10028          }
10029          if (hasHttpPort()) {
10030            hash = (37 * hash) + HTTPPORT_FIELD_NUMBER;
10031            hash = (53 * hash) + getHttpPort();
10032          }
10033          if (hasFromURL()) {
10034            hash = (37 * hash) + FROMURL_FIELD_NUMBER;
10035            hash = (53 * hash) + getFromURL().hashCode();
10036          }
10037          hash = (29 * hash) + getUnknownFields().hashCode();
10038          memoizedHashCode = hash;
10039          return hash;
10040        }
10041    
10042        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
10043            com.google.protobuf.ByteString data)
10044            throws com.google.protobuf.InvalidProtocolBufferException {
10045          return PARSER.parseFrom(data);
10046        }
10047        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
10048            com.google.protobuf.ByteString data,
10049            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10050            throws com.google.protobuf.InvalidProtocolBufferException {
10051          return PARSER.parseFrom(data, extensionRegistry);
10052        }
10053        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(byte[] data)
10054            throws com.google.protobuf.InvalidProtocolBufferException {
10055          return PARSER.parseFrom(data);
10056        }
10057        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
10058            byte[] data,
10059            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10060            throws com.google.protobuf.InvalidProtocolBufferException {
10061          return PARSER.parseFrom(data, extensionRegistry);
10062        }
10063        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(java.io.InputStream input)
10064            throws java.io.IOException {
10065          return PARSER.parseFrom(input);
10066        }
10067        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
10068            java.io.InputStream input,
10069            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10070            throws java.io.IOException {
10071          return PARSER.parseFrom(input, extensionRegistry);
10072        }
10073        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseDelimitedFrom(java.io.InputStream input)
10074            throws java.io.IOException {
10075          return PARSER.parseDelimitedFrom(input);
10076        }
10077        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseDelimitedFrom(
10078            java.io.InputStream input,
10079            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10080            throws java.io.IOException {
10081          return PARSER.parseDelimitedFrom(input, extensionRegistry);
10082        }
10083        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
10084            com.google.protobuf.CodedInputStream input)
10085            throws java.io.IOException {
10086          return PARSER.parseFrom(input);
10087        }
10088        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
10089            com.google.protobuf.CodedInputStream input,
10090            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10091            throws java.io.IOException {
10092          return PARSER.parseFrom(input, extensionRegistry);
10093        }
10094    
10095        public static Builder newBuilder() { return Builder.create(); }
10096        public Builder newBuilderForType() { return newBuilder(); }
10097        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto prototype) {
10098          return newBuilder().mergeFrom(prototype);
10099        }
10100        public Builder toBuilder() { return newBuilder(this); }
10101    
10102        @java.lang.Override
10103        protected Builder newBuilderForType(
10104            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
10105          Builder builder = new Builder(parent);
10106          return builder;
10107        }
10108        /**
10109         * Protobuf type {@code hadoop.hdfs.GetJournalStateResponseProto}
10110         */
10111        public static final class Builder extends
10112            com.google.protobuf.GeneratedMessage.Builder<Builder>
10113           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProtoOrBuilder {
10114          public static final com.google.protobuf.Descriptors.Descriptor
10115              getDescriptor() {
10116            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor;
10117          }
10118    
10119          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
10120              internalGetFieldAccessorTable() {
10121            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateResponseProto_fieldAccessorTable
10122                .ensureFieldAccessorsInitialized(
10123                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.Builder.class);
10124          }
10125    
10126          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.newBuilder()
10127          private Builder() {
10128            maybeForceBuilderInitialization();
10129          }
10130    
10131          private Builder(
10132              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
10133            super(parent);
10134            maybeForceBuilderInitialization();
10135          }
10136          private void maybeForceBuilderInitialization() {
10137            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
10138            }
10139          }
10140          private static Builder create() {
10141            return new Builder();
10142          }
10143    
10144          public Builder clear() {
10145            super.clear();
10146            lastPromisedEpoch_ = 0L;
10147            bitField0_ = (bitField0_ & ~0x00000001);
10148            httpPort_ = 0;
10149            bitField0_ = (bitField0_ & ~0x00000002);
10150            fromURL_ = "";
10151            bitField0_ = (bitField0_ & ~0x00000004);
10152            return this;
10153          }
10154    
10155          public Builder clone() {
10156            return create().mergeFrom(buildPartial());
10157          }
10158    
10159          public com.google.protobuf.Descriptors.Descriptor
10160              getDescriptorForType() {
10161            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor;
10162          }
10163    
10164          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto getDefaultInstanceForType() {
10165            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance();
10166          }
10167    
10168          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto build() {
10169            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto result = buildPartial();
10170            if (!result.isInitialized()) {
10171              throw newUninitializedMessageException(result);
10172            }
10173            return result;
10174          }
10175    
10176          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto buildPartial() {
10177            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto(this);
10178            int from_bitField0_ = bitField0_;
10179            int to_bitField0_ = 0;
10180            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
10181              to_bitField0_ |= 0x00000001;
10182            }
10183            result.lastPromisedEpoch_ = lastPromisedEpoch_;
10184            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
10185              to_bitField0_ |= 0x00000002;
10186            }
10187            result.httpPort_ = httpPort_;
10188            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
10189              to_bitField0_ |= 0x00000004;
10190            }
10191            result.fromURL_ = fromURL_;
10192            result.bitField0_ = to_bitField0_;
10193            onBuilt();
10194            return result;
10195          }
10196    
10197          public Builder mergeFrom(com.google.protobuf.Message other) {
10198            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) {
10199              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto)other);
10200            } else {
10201              super.mergeFrom(other);
10202              return this;
10203            }
10204          }
10205    
10206          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto other) {
10207            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance()) return this;
10208            if (other.hasLastPromisedEpoch()) {
10209              setLastPromisedEpoch(other.getLastPromisedEpoch());
10210            }
10211            if (other.hasHttpPort()) {
10212              setHttpPort(other.getHttpPort());
10213            }
10214            if (other.hasFromURL()) {
10215              bitField0_ |= 0x00000004;
10216              fromURL_ = other.fromURL_;
10217              onChanged();
10218            }
10219            this.mergeUnknownFields(other.getUnknownFields());
10220            return this;
10221          }
10222    
10223          public final boolean isInitialized() {
10224            if (!hasLastPromisedEpoch()) {
10225              
10226              return false;
10227            }
10228            if (!hasHttpPort()) {
10229              
10230              return false;
10231            }
10232            return true;
10233          }
10234    
10235          public Builder mergeFrom(
10236              com.google.protobuf.CodedInputStream input,
10237              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10238              throws java.io.IOException {
10239            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parsedMessage = null;
10240            try {
10241              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
10242            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
10243              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) e.getUnfinishedMessage();
10244              throw e;
10245            } finally {
10246              if (parsedMessage != null) {
10247                mergeFrom(parsedMessage);
10248              }
10249            }
10250            return this;
10251          }
10252          private int bitField0_;
10253    
10254          // required uint64 lastPromisedEpoch = 1;
10255          private long lastPromisedEpoch_ ;
10256          /**
10257           * <code>required uint64 lastPromisedEpoch = 1;</code>
10258           */
10259          public boolean hasLastPromisedEpoch() {
10260            return ((bitField0_ & 0x00000001) == 0x00000001);
10261          }
10262          /**
10263           * <code>required uint64 lastPromisedEpoch = 1;</code>
10264           */
10265          public long getLastPromisedEpoch() {
10266            return lastPromisedEpoch_;
10267          }
10268          /**
10269           * <code>required uint64 lastPromisedEpoch = 1;</code>
10270           */
10271          public Builder setLastPromisedEpoch(long value) {
10272            bitField0_ |= 0x00000001;
10273            lastPromisedEpoch_ = value;
10274            onChanged();
10275            return this;
10276          }
10277          /**
10278           * <code>required uint64 lastPromisedEpoch = 1;</code>
10279           */
10280          public Builder clearLastPromisedEpoch() {
10281            bitField0_ = (bitField0_ & ~0x00000001);
10282            lastPromisedEpoch_ = 0L;
10283            onChanged();
10284            return this;
10285          }
10286    
10287          // required uint32 httpPort = 2;
10288          private int httpPort_ ;
10289          /**
10290           * <code>required uint32 httpPort = 2;</code>
10291           *
10292           * <pre>
10293           * Deprecated by fromURL
10294           * </pre>
10295           */
10296          public boolean hasHttpPort() {
10297            return ((bitField0_ & 0x00000002) == 0x00000002);
10298          }
10299          /**
10300           * <code>required uint32 httpPort = 2;</code>
10301           *
10302           * <pre>
10303           * Deprecated by fromURL
10304           * </pre>
10305           */
10306          public int getHttpPort() {
10307            return httpPort_;
10308          }
10309          /**
10310           * <code>required uint32 httpPort = 2;</code>
10311           *
10312           * <pre>
10313           * Deprecated by fromURL
10314           * </pre>
10315           */
10316          public Builder setHttpPort(int value) {
10317            bitField0_ |= 0x00000002;
10318            httpPort_ = value;
10319            onChanged();
10320            return this;
10321          }
10322          /**
10323           * <code>required uint32 httpPort = 2;</code>
10324           *
10325           * <pre>
10326           * Deprecated by fromURL
10327           * </pre>
10328           */
10329          public Builder clearHttpPort() {
10330            bitField0_ = (bitField0_ & ~0x00000002);
10331            httpPort_ = 0;
10332            onChanged();
10333            return this;
10334          }
10335    
10336          // optional string fromURL = 3;
10337          private java.lang.Object fromURL_ = "";
10338          /**
10339           * <code>optional string fromURL = 3;</code>
10340           */
10341          public boolean hasFromURL() {
10342            return ((bitField0_ & 0x00000004) == 0x00000004);
10343          }
10344          /**
10345           * <code>optional string fromURL = 3;</code>
10346           */
10347          public java.lang.String getFromURL() {
10348            java.lang.Object ref = fromURL_;
10349            if (!(ref instanceof java.lang.String)) {
10350              java.lang.String s = ((com.google.protobuf.ByteString) ref)
10351                  .toStringUtf8();
10352              fromURL_ = s;
10353              return s;
10354            } else {
10355              return (java.lang.String) ref;
10356            }
10357          }
10358          /**
10359           * <code>optional string fromURL = 3;</code>
10360           */
10361          public com.google.protobuf.ByteString
10362              getFromURLBytes() {
10363            java.lang.Object ref = fromURL_;
10364            if (ref instanceof String) {
10365              com.google.protobuf.ByteString b = 
10366                  com.google.protobuf.ByteString.copyFromUtf8(
10367                      (java.lang.String) ref);
10368              fromURL_ = b;
10369              return b;
10370            } else {
10371              return (com.google.protobuf.ByteString) ref;
10372            }
10373          }
10374          /**
10375           * <code>optional string fromURL = 3;</code>
10376           */
10377          public Builder setFromURL(
10378              java.lang.String value) {
10379            if (value == null) {
10380        throw new NullPointerException();
10381      }
10382      bitField0_ |= 0x00000004;
10383            fromURL_ = value;
10384            onChanged();
10385            return this;
10386          }
10387          /**
10388           * <code>optional string fromURL = 3;</code>
10389           */
10390          public Builder clearFromURL() {
10391            bitField0_ = (bitField0_ & ~0x00000004);
10392            fromURL_ = getDefaultInstance().getFromURL();
10393            onChanged();
10394            return this;
10395          }
10396          /**
10397           * <code>optional string fromURL = 3;</code>
10398           */
10399          public Builder setFromURLBytes(
10400              com.google.protobuf.ByteString value) {
10401            if (value == null) {
10402        throw new NullPointerException();
10403      }
10404      bitField0_ |= 0x00000004;
10405            fromURL_ = value;
10406            onChanged();
10407            return this;
10408          }
10409    
10410          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetJournalStateResponseProto)
10411        }
10412    
10413        static {
10414          defaultInstance = new GetJournalStateResponseProto(true);
10415          defaultInstance.initFields();
10416        }
10417    
10418        // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetJournalStateResponseProto)
10419      }
10420    
10421      public interface FormatRequestProtoOrBuilder
10422          extends com.google.protobuf.MessageOrBuilder {
10423    
10424        // required .hadoop.hdfs.JournalIdProto jid = 1;
10425        /**
10426         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10427         */
10428        boolean hasJid();
10429        /**
10430         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10431         */
10432        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
10433        /**
10434         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10435         */
10436        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
10437    
10438        // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
10439        /**
10440         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10441         */
10442        boolean hasNsInfo();
10443        /**
10444         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10445         */
10446        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo();
10447        /**
10448         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10449         */
10450        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder();
10451      }
10452      /**
10453       * Protobuf type {@code hadoop.hdfs.FormatRequestProto}
10454       *
10455       * <pre>
10456       **
10457       * format()
10458       * </pre>
10459       */
10460      public static final class FormatRequestProto extends
10461          com.google.protobuf.GeneratedMessage
10462          implements FormatRequestProtoOrBuilder {
10463        // Use FormatRequestProto.newBuilder() to construct.
10464        private FormatRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
10465          super(builder);
10466          this.unknownFields = builder.getUnknownFields();
10467        }
10468        private FormatRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
10469    
10470        private static final FormatRequestProto defaultInstance;
10471        public static FormatRequestProto getDefaultInstance() {
10472          return defaultInstance;
10473        }
10474    
10475        public FormatRequestProto getDefaultInstanceForType() {
10476          return defaultInstance;
10477        }
10478    
10479        private final com.google.protobuf.UnknownFieldSet unknownFields;
10480        @java.lang.Override
10481        public final com.google.protobuf.UnknownFieldSet
10482            getUnknownFields() {
10483          return this.unknownFields;
10484        }
10485        private FormatRequestProto(
10486            com.google.protobuf.CodedInputStream input,
10487            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10488            throws com.google.protobuf.InvalidProtocolBufferException {
10489          initFields();
10490          int mutable_bitField0_ = 0;
10491          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
10492              com.google.protobuf.UnknownFieldSet.newBuilder();
10493          try {
10494            boolean done = false;
10495            while (!done) {
10496              int tag = input.readTag();
10497              switch (tag) {
10498                case 0:
10499                  done = true;
10500                  break;
10501                default: {
10502                  if (!parseUnknownField(input, unknownFields,
10503                                         extensionRegistry, tag)) {
10504                    done = true;
10505                  }
10506                  break;
10507                }
10508                case 10: {
10509                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
10510                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
10511                    subBuilder = jid_.toBuilder();
10512                  }
10513                  jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
10514                  if (subBuilder != null) {
10515                    subBuilder.mergeFrom(jid_);
10516                    jid_ = subBuilder.buildPartial();
10517                  }
10518                  bitField0_ |= 0x00000001;
10519                  break;
10520                }
10521                case 18: {
10522                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder subBuilder = null;
10523                  if (((bitField0_ & 0x00000002) == 0x00000002)) {
10524                    subBuilder = nsInfo_.toBuilder();
10525                  }
10526                  nsInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.PARSER, extensionRegistry);
10527                  if (subBuilder != null) {
10528                    subBuilder.mergeFrom(nsInfo_);
10529                    nsInfo_ = subBuilder.buildPartial();
10530                  }
10531                  bitField0_ |= 0x00000002;
10532                  break;
10533                }
10534              }
10535            }
10536          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
10537            throw e.setUnfinishedMessage(this);
10538          } catch (java.io.IOException e) {
10539            throw new com.google.protobuf.InvalidProtocolBufferException(
10540                e.getMessage()).setUnfinishedMessage(this);
10541          } finally {
10542            this.unknownFields = unknownFields.build();
10543            makeExtensionsImmutable();
10544          }
10545        }
10546        public static final com.google.protobuf.Descriptors.Descriptor
10547            getDescriptor() {
10548          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatRequestProto_descriptor;
10549        }
10550    
10551        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
10552            internalGetFieldAccessorTable() {
10553          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatRequestProto_fieldAccessorTable
10554              .ensureFieldAccessorsInitialized(
10555                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.Builder.class);
10556        }
10557    
10558        public static com.google.protobuf.Parser<FormatRequestProto> PARSER =
10559            new com.google.protobuf.AbstractParser<FormatRequestProto>() {
10560          public FormatRequestProto parsePartialFrom(
10561              com.google.protobuf.CodedInputStream input,
10562              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10563              throws com.google.protobuf.InvalidProtocolBufferException {
10564            return new FormatRequestProto(input, extensionRegistry);
10565          }
10566        };
10567    
10568        @java.lang.Override
10569        public com.google.protobuf.Parser<FormatRequestProto> getParserForType() {
10570          return PARSER;
10571        }
10572    
10573        private int bitField0_;
10574        // required .hadoop.hdfs.JournalIdProto jid = 1;
10575        public static final int JID_FIELD_NUMBER = 1;
10576        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
10577        /**
10578         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10579         */
10580        public boolean hasJid() {
10581          return ((bitField0_ & 0x00000001) == 0x00000001);
10582        }
10583        /**
10584         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10585         */
10586        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
10587          return jid_;
10588        }
10589        /**
10590         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10591         */
10592        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
10593          return jid_;
10594        }
10595    
10596        // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
10597        public static final int NSINFO_FIELD_NUMBER = 2;
10598        private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_;
10599        /**
10600         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10601         */
10602        public boolean hasNsInfo() {
10603          return ((bitField0_ & 0x00000002) == 0x00000002);
10604        }
10605        /**
10606         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10607         */
10608        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
10609          return nsInfo_;
10610        }
10611        /**
10612         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
10613         */
10614        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
10615          return nsInfo_;
10616        }
10617    
10618        private void initFields() {
10619          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
10620          nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
10621        }
10622        private byte memoizedIsInitialized = -1;
10623        public final boolean isInitialized() {
10624          byte isInitialized = memoizedIsInitialized;
10625          if (isInitialized != -1) return isInitialized == 1;
10626    
10627          if (!hasJid()) {
10628            memoizedIsInitialized = 0;
10629            return false;
10630          }
10631          if (!hasNsInfo()) {
10632            memoizedIsInitialized = 0;
10633            return false;
10634          }
10635          if (!getJid().isInitialized()) {
10636            memoizedIsInitialized = 0;
10637            return false;
10638          }
10639          if (!getNsInfo().isInitialized()) {
10640            memoizedIsInitialized = 0;
10641            return false;
10642          }
10643          memoizedIsInitialized = 1;
10644          return true;
10645        }
10646    
10647        public void writeTo(com.google.protobuf.CodedOutputStream output)
10648                            throws java.io.IOException {
10649          getSerializedSize();
10650          if (((bitField0_ & 0x00000001) == 0x00000001)) {
10651            output.writeMessage(1, jid_);
10652          }
10653          if (((bitField0_ & 0x00000002) == 0x00000002)) {
10654            output.writeMessage(2, nsInfo_);
10655          }
10656          getUnknownFields().writeTo(output);
10657        }
10658    
10659        private int memoizedSerializedSize = -1;
10660        public int getSerializedSize() {
10661          int size = memoizedSerializedSize;
10662          if (size != -1) return size;
10663    
10664          size = 0;
10665          if (((bitField0_ & 0x00000001) == 0x00000001)) {
10666            size += com.google.protobuf.CodedOutputStream
10667              .computeMessageSize(1, jid_);
10668          }
10669          if (((bitField0_ & 0x00000002) == 0x00000002)) {
10670            size += com.google.protobuf.CodedOutputStream
10671              .computeMessageSize(2, nsInfo_);
10672          }
10673          size += getUnknownFields().getSerializedSize();
10674          memoizedSerializedSize = size;
10675          return size;
10676        }
10677    
10678        private static final long serialVersionUID = 0L;
10679        @java.lang.Override
10680        protected java.lang.Object writeReplace()
10681            throws java.io.ObjectStreamException {
10682          return super.writeReplace();
10683        }
10684    
10685        @java.lang.Override
10686        public boolean equals(final java.lang.Object obj) {
10687          if (obj == this) {
10688           return true;
10689          }
10690          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)) {
10691            return super.equals(obj);
10692          }
10693          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto) obj;
10694    
10695          boolean result = true;
10696          result = result && (hasJid() == other.hasJid());
10697          if (hasJid()) {
10698            result = result && getJid()
10699                .equals(other.getJid());
10700          }
10701          result = result && (hasNsInfo() == other.hasNsInfo());
10702          if (hasNsInfo()) {
10703            result = result && getNsInfo()
10704                .equals(other.getNsInfo());
10705          }
10706          result = result &&
10707              getUnknownFields().equals(other.getUnknownFields());
10708          return result;
10709        }
10710    
10711        private int memoizedHashCode = 0;
10712        @java.lang.Override
10713        public int hashCode() {
10714          if (memoizedHashCode != 0) {
10715            return memoizedHashCode;
10716          }
10717          int hash = 41;
10718          hash = (19 * hash) + getDescriptorForType().hashCode();
10719          if (hasJid()) {
10720            hash = (37 * hash) + JID_FIELD_NUMBER;
10721            hash = (53 * hash) + getJid().hashCode();
10722          }
10723          if (hasNsInfo()) {
10724            hash = (37 * hash) + NSINFO_FIELD_NUMBER;
10725            hash = (53 * hash) + getNsInfo().hashCode();
10726          }
10727          hash = (29 * hash) + getUnknownFields().hashCode();
10728          memoizedHashCode = hash;
10729          return hash;
10730        }
10731    
10732        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
10733            com.google.protobuf.ByteString data)
10734            throws com.google.protobuf.InvalidProtocolBufferException {
10735          return PARSER.parseFrom(data);
10736        }
10737        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
10738            com.google.protobuf.ByteString data,
10739            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10740            throws com.google.protobuf.InvalidProtocolBufferException {
10741          return PARSER.parseFrom(data, extensionRegistry);
10742        }
10743        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(byte[] data)
10744            throws com.google.protobuf.InvalidProtocolBufferException {
10745          return PARSER.parseFrom(data);
10746        }
10747        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
10748            byte[] data,
10749            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10750            throws com.google.protobuf.InvalidProtocolBufferException {
10751          return PARSER.parseFrom(data, extensionRegistry);
10752        }
10753        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(java.io.InputStream input)
10754            throws java.io.IOException {
10755          return PARSER.parseFrom(input);
10756        }
10757        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
10758            java.io.InputStream input,
10759            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10760            throws java.io.IOException {
10761          return PARSER.parseFrom(input, extensionRegistry);
10762        }
10763        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseDelimitedFrom(java.io.InputStream input)
10764            throws java.io.IOException {
10765          return PARSER.parseDelimitedFrom(input);
10766        }
10767        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseDelimitedFrom(
10768            java.io.InputStream input,
10769            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10770            throws java.io.IOException {
10771          return PARSER.parseDelimitedFrom(input, extensionRegistry);
10772        }
10773        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
10774            com.google.protobuf.CodedInputStream input)
10775            throws java.io.IOException {
10776          return PARSER.parseFrom(input);
10777        }
10778        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
10779            com.google.protobuf.CodedInputStream input,
10780            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10781            throws java.io.IOException {
10782          return PARSER.parseFrom(input, extensionRegistry);
10783        }
10784    
10785        public static Builder newBuilder() { return Builder.create(); }
10786        public Builder newBuilderForType() { return newBuilder(); }
10787        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto prototype) {
10788          return newBuilder().mergeFrom(prototype);
10789        }
10790        public Builder toBuilder() { return newBuilder(this); }
10791    
10792        @java.lang.Override
10793        protected Builder newBuilderForType(
10794            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
10795          Builder builder = new Builder(parent);
10796          return builder;
10797        }
10798        /**
10799         * Protobuf type {@code hadoop.hdfs.FormatRequestProto}
10800         *
10801         * <pre>
10802         **
10803         * format()
10804         * </pre>
10805         */
10806        public static final class Builder extends
10807            com.google.protobuf.GeneratedMessage.Builder<Builder>
10808           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProtoOrBuilder {
10809          public static final com.google.protobuf.Descriptors.Descriptor
10810              getDescriptor() {
10811            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatRequestProto_descriptor;
10812          }
10813    
10814          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
10815              internalGetFieldAccessorTable() {
10816            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatRequestProto_fieldAccessorTable
10817                .ensureFieldAccessorsInitialized(
10818                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.Builder.class);
10819          }
10820    
10821          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.newBuilder()
10822          private Builder() {
10823            maybeForceBuilderInitialization();
10824          }
10825    
10826          private Builder(
10827              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
10828            super(parent);
10829            maybeForceBuilderInitialization();
10830          }
10831          private void maybeForceBuilderInitialization() {
10832            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
10833              getJidFieldBuilder();
10834              getNsInfoFieldBuilder();
10835            }
10836          }
10837          private static Builder create() {
10838            return new Builder();
10839          }
10840    
10841          public Builder clear() {
10842            super.clear();
10843            if (jidBuilder_ == null) {
10844              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
10845            } else {
10846              jidBuilder_.clear();
10847            }
10848            bitField0_ = (bitField0_ & ~0x00000001);
10849            if (nsInfoBuilder_ == null) {
10850              nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
10851            } else {
10852              nsInfoBuilder_.clear();
10853            }
10854            bitField0_ = (bitField0_ & ~0x00000002);
10855            return this;
10856          }
10857    
10858          public Builder clone() {
10859            return create().mergeFrom(buildPartial());
10860          }
10861    
10862          public com.google.protobuf.Descriptors.Descriptor
10863              getDescriptorForType() {
10864            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatRequestProto_descriptor;
10865          }
10866    
10867          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto getDefaultInstanceForType() {
10868            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance();
10869          }
10870    
10871          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto build() {
10872            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto result = buildPartial();
10873            if (!result.isInitialized()) {
10874              throw newUninitializedMessageException(result);
10875            }
10876            return result;
10877          }
10878    
10879          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto buildPartial() {
10880            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto(this);
10881            int from_bitField0_ = bitField0_;
10882            int to_bitField0_ = 0;
10883            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
10884              to_bitField0_ |= 0x00000001;
10885            }
10886            if (jidBuilder_ == null) {
10887              result.jid_ = jid_;
10888            } else {
10889              result.jid_ = jidBuilder_.build();
10890            }
10891            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
10892              to_bitField0_ |= 0x00000002;
10893            }
10894            if (nsInfoBuilder_ == null) {
10895              result.nsInfo_ = nsInfo_;
10896            } else {
10897              result.nsInfo_ = nsInfoBuilder_.build();
10898            }
10899            result.bitField0_ = to_bitField0_;
10900            onBuilt();
10901            return result;
10902          }
10903    
10904          public Builder mergeFrom(com.google.protobuf.Message other) {
10905            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto) {
10906              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)other);
10907            } else {
10908              super.mergeFrom(other);
10909              return this;
10910            }
10911          }
10912    
10913          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto other) {
10914            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance()) return this;
10915            if (other.hasJid()) {
10916              mergeJid(other.getJid());
10917            }
10918            if (other.hasNsInfo()) {
10919              mergeNsInfo(other.getNsInfo());
10920            }
10921            this.mergeUnknownFields(other.getUnknownFields());
10922            return this;
10923          }
10924    
10925          public final boolean isInitialized() {
10926            if (!hasJid()) {
10927              
10928              return false;
10929            }
10930            if (!hasNsInfo()) {
10931              
10932              return false;
10933            }
10934            if (!getJid().isInitialized()) {
10935              
10936              return false;
10937            }
10938            if (!getNsInfo().isInitialized()) {
10939              
10940              return false;
10941            }
10942            return true;
10943          }
10944    
10945          public Builder mergeFrom(
10946              com.google.protobuf.CodedInputStream input,
10947              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10948              throws java.io.IOException {
10949            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parsedMessage = null;
10950            try {
10951              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
10952            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
10953              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto) e.getUnfinishedMessage();
10954              throw e;
10955            } finally {
10956              if (parsedMessage != null) {
10957                mergeFrom(parsedMessage);
10958              }
10959            }
10960            return this;
10961          }
10962          private int bitField0_;
10963    
10964          // required .hadoop.hdfs.JournalIdProto jid = 1;
10965          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
10966          private com.google.protobuf.SingleFieldBuilder<
10967              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
10968          /**
10969           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10970           */
10971          public boolean hasJid() {
10972            return ((bitField0_ & 0x00000001) == 0x00000001);
10973          }
10974          /**
10975           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10976           */
10977          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
10978            if (jidBuilder_ == null) {
10979              return jid_;
10980            } else {
10981              return jidBuilder_.getMessage();
10982            }
10983          }
10984          /**
10985           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
10986           */
10987          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
10988            if (jidBuilder_ == null) {
10989              if (value == null) {
10990                throw new NullPointerException();
10991              }
10992              jid_ = value;
10993              onChanged();
10994            } else {
10995              jidBuilder_.setMessage(value);
10996            }
10997            bitField0_ |= 0x00000001;
10998            return this;
10999          }
11000          /**
11001           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11002           */
11003          public Builder setJid(
11004              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
11005            if (jidBuilder_ == null) {
11006              jid_ = builderForValue.build();
11007              onChanged();
11008            } else {
11009              jidBuilder_.setMessage(builderForValue.build());
11010            }
11011            bitField0_ |= 0x00000001;
11012            return this;
11013          }
11014          /**
11015           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11016           */
11017          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
11018            if (jidBuilder_ == null) {
11019              if (((bitField0_ & 0x00000001) == 0x00000001) &&
11020                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
11021                jid_ =
11022                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
11023              } else {
11024                jid_ = value;
11025              }
11026              onChanged();
11027            } else {
11028              jidBuilder_.mergeFrom(value);
11029            }
11030            bitField0_ |= 0x00000001;
11031            return this;
11032          }
11033          /**
11034           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11035           */
11036          public Builder clearJid() {
11037            if (jidBuilder_ == null) {
11038              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
11039              onChanged();
11040            } else {
11041              jidBuilder_.clear();
11042            }
11043            bitField0_ = (bitField0_ & ~0x00000001);
11044            return this;
11045          }
11046          /**
11047           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11048           */
11049          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
11050            bitField0_ |= 0x00000001;
11051            onChanged();
11052            return getJidFieldBuilder().getBuilder();
11053          }
11054          /**
11055           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11056           */
11057          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
11058            if (jidBuilder_ != null) {
11059              return jidBuilder_.getMessageOrBuilder();
11060            } else {
11061              return jid_;
11062            }
11063          }
11064          /**
11065           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11066           */
11067          private com.google.protobuf.SingleFieldBuilder<
11068              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
11069              getJidFieldBuilder() {
11070            if (jidBuilder_ == null) {
11071              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
11072                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
11073                      jid_,
11074                      getParentForChildren(),
11075                      isClean());
11076              jid_ = null;
11077            }
11078            return jidBuilder_;
11079          }
11080    
11081          // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
11082          private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
11083          private com.google.protobuf.SingleFieldBuilder<
11084              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> nsInfoBuilder_;
11085          /**
11086           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11087           */
11088          public boolean hasNsInfo() {
11089            return ((bitField0_ & 0x00000002) == 0x00000002);
11090          }
11091          /**
11092           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11093           */
11094          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
11095            if (nsInfoBuilder_ == null) {
11096              return nsInfo_;
11097            } else {
11098              return nsInfoBuilder_.getMessage();
11099            }
11100          }
11101          /**
11102           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11103           */
11104          public Builder setNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
11105            if (nsInfoBuilder_ == null) {
11106              if (value == null) {
11107                throw new NullPointerException();
11108              }
11109              nsInfo_ = value;
11110              onChanged();
11111            } else {
11112              nsInfoBuilder_.setMessage(value);
11113            }
11114            bitField0_ |= 0x00000002;
11115            return this;
11116          }
11117          /**
11118           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11119           */
11120          public Builder setNsInfo(
11121              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder builderForValue) {
11122            if (nsInfoBuilder_ == null) {
11123              nsInfo_ = builderForValue.build();
11124              onChanged();
11125            } else {
11126              nsInfoBuilder_.setMessage(builderForValue.build());
11127            }
11128            bitField0_ |= 0x00000002;
11129            return this;
11130          }
11131          /**
11132           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11133           */
11134          public Builder mergeNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
11135            if (nsInfoBuilder_ == null) {
11136              if (((bitField0_ & 0x00000002) == 0x00000002) &&
11137                  nsInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance()) {
11138                nsInfo_ =
11139                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.newBuilder(nsInfo_).mergeFrom(value).buildPartial();
11140              } else {
11141                nsInfo_ = value;
11142              }
11143              onChanged();
11144            } else {
11145              nsInfoBuilder_.mergeFrom(value);
11146            }
11147            bitField0_ |= 0x00000002;
11148            return this;
11149          }
11150          /**
11151           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11152           */
11153          public Builder clearNsInfo() {
11154            if (nsInfoBuilder_ == null) {
11155              nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
11156              onChanged();
11157            } else {
11158              nsInfoBuilder_.clear();
11159            }
11160            bitField0_ = (bitField0_ & ~0x00000002);
11161            return this;
11162          }
11163          /**
11164           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11165           */
11166          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder getNsInfoBuilder() {
11167            bitField0_ |= 0x00000002;
11168            onChanged();
11169            return getNsInfoFieldBuilder().getBuilder();
11170          }
11171          /**
11172           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11173           */
11174          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
11175            if (nsInfoBuilder_ != null) {
11176              return nsInfoBuilder_.getMessageOrBuilder();
11177            } else {
11178              return nsInfo_;
11179            }
11180          }
11181          /**
11182           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11183           */
11184          private com.google.protobuf.SingleFieldBuilder<
11185              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> 
11186              getNsInfoFieldBuilder() {
11187            if (nsInfoBuilder_ == null) {
11188              nsInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
11189                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder>(
11190                      nsInfo_,
11191                      getParentForChildren(),
11192                      isClean());
11193              nsInfo_ = null;
11194            }
11195            return nsInfoBuilder_;
11196          }
11197    
11198          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FormatRequestProto)
11199        }
11200    
11201        static {
11202          defaultInstance = new FormatRequestProto(true);
11203          defaultInstance.initFields();
11204        }
11205    
11206        // @@protoc_insertion_point(class_scope:hadoop.hdfs.FormatRequestProto)
11207      }
11208    
11209      public interface FormatResponseProtoOrBuilder
11210          extends com.google.protobuf.MessageOrBuilder {
11211      }
11212      /**
11213       * Protobuf type {@code hadoop.hdfs.FormatResponseProto}
11214       */
11215      public static final class FormatResponseProto extends
11216          com.google.protobuf.GeneratedMessage
11217          implements FormatResponseProtoOrBuilder {
11218        // Use FormatResponseProto.newBuilder() to construct.
11219        private FormatResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
11220          super(builder);
11221          this.unknownFields = builder.getUnknownFields();
11222        }
11223        private FormatResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
11224    
11225        private static final FormatResponseProto defaultInstance;
11226        public static FormatResponseProto getDefaultInstance() {
11227          return defaultInstance;
11228        }
11229    
11230        public FormatResponseProto getDefaultInstanceForType() {
11231          return defaultInstance;
11232        }
11233    
11234        private final com.google.protobuf.UnknownFieldSet unknownFields;
11235        @java.lang.Override
11236        public final com.google.protobuf.UnknownFieldSet
11237            getUnknownFields() {
11238          return this.unknownFields;
11239        }
11240        private FormatResponseProto(
11241            com.google.protobuf.CodedInputStream input,
11242            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11243            throws com.google.protobuf.InvalidProtocolBufferException {
11244          initFields();
11245          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
11246              com.google.protobuf.UnknownFieldSet.newBuilder();
11247          try {
11248            boolean done = false;
11249            while (!done) {
11250              int tag = input.readTag();
11251              switch (tag) {
11252                case 0:
11253                  done = true;
11254                  break;
11255                default: {
11256                  if (!parseUnknownField(input, unknownFields,
11257                                         extensionRegistry, tag)) {
11258                    done = true;
11259                  }
11260                  break;
11261                }
11262              }
11263            }
11264          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
11265            throw e.setUnfinishedMessage(this);
11266          } catch (java.io.IOException e) {
11267            throw new com.google.protobuf.InvalidProtocolBufferException(
11268                e.getMessage()).setUnfinishedMessage(this);
11269          } finally {
11270            this.unknownFields = unknownFields.build();
11271            makeExtensionsImmutable();
11272          }
11273        }
11274        public static final com.google.protobuf.Descriptors.Descriptor
11275            getDescriptor() {
11276          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatResponseProto_descriptor;
11277        }
11278    
11279        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11280            internalGetFieldAccessorTable() {
11281          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatResponseProto_fieldAccessorTable
11282              .ensureFieldAccessorsInitialized(
11283                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.Builder.class);
11284        }
11285    
11286        public static com.google.protobuf.Parser<FormatResponseProto> PARSER =
11287            new com.google.protobuf.AbstractParser<FormatResponseProto>() {
11288          public FormatResponseProto parsePartialFrom(
11289              com.google.protobuf.CodedInputStream input,
11290              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11291              throws com.google.protobuf.InvalidProtocolBufferException {
11292            return new FormatResponseProto(input, extensionRegistry);
11293          }
11294        };
11295    
11296        @java.lang.Override
11297        public com.google.protobuf.Parser<FormatResponseProto> getParserForType() {
11298          return PARSER;
11299        }
11300    
11301        private void initFields() {
11302        }
11303        private byte memoizedIsInitialized = -1;
11304        public final boolean isInitialized() {
11305          byte isInitialized = memoizedIsInitialized;
11306          if (isInitialized != -1) return isInitialized == 1;
11307    
11308          memoizedIsInitialized = 1;
11309          return true;
11310        }
11311    
11312        public void writeTo(com.google.protobuf.CodedOutputStream output)
11313                            throws java.io.IOException {
11314          getSerializedSize();
11315          getUnknownFields().writeTo(output);
11316        }
11317    
11318        private int memoizedSerializedSize = -1;
11319        public int getSerializedSize() {
11320          int size = memoizedSerializedSize;
11321          if (size != -1) return size;
11322    
11323          size = 0;
11324          size += getUnknownFields().getSerializedSize();
11325          memoizedSerializedSize = size;
11326          return size;
11327        }
11328    
11329        private static final long serialVersionUID = 0L;
11330        @java.lang.Override
11331        protected java.lang.Object writeReplace()
11332            throws java.io.ObjectStreamException {
11333          return super.writeReplace();
11334        }
11335    
11336        @java.lang.Override
11337        public boolean equals(final java.lang.Object obj) {
11338          if (obj == this) {
11339           return true;
11340          }
11341          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto)) {
11342            return super.equals(obj);
11343          }
11344          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) obj;
11345    
11346          boolean result = true;
11347          result = result &&
11348              getUnknownFields().equals(other.getUnknownFields());
11349          return result;
11350        }
11351    
11352        private int memoizedHashCode = 0;
11353        @java.lang.Override
11354        public int hashCode() {
11355          if (memoizedHashCode != 0) {
11356            return memoizedHashCode;
11357          }
11358          int hash = 41;
11359          hash = (19 * hash) + getDescriptorForType().hashCode();
11360          hash = (29 * hash) + getUnknownFields().hashCode();
11361          memoizedHashCode = hash;
11362          return hash;
11363        }
11364    
11365        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
11366            com.google.protobuf.ByteString data)
11367            throws com.google.protobuf.InvalidProtocolBufferException {
11368          return PARSER.parseFrom(data);
11369        }
11370        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
11371            com.google.protobuf.ByteString data,
11372            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11373            throws com.google.protobuf.InvalidProtocolBufferException {
11374          return PARSER.parseFrom(data, extensionRegistry);
11375        }
11376        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(byte[] data)
11377            throws com.google.protobuf.InvalidProtocolBufferException {
11378          return PARSER.parseFrom(data);
11379        }
11380        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
11381            byte[] data,
11382            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11383            throws com.google.protobuf.InvalidProtocolBufferException {
11384          return PARSER.parseFrom(data, extensionRegistry);
11385        }
11386        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(java.io.InputStream input)
11387            throws java.io.IOException {
11388          return PARSER.parseFrom(input);
11389        }
11390        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
11391            java.io.InputStream input,
11392            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11393            throws java.io.IOException {
11394          return PARSER.parseFrom(input, extensionRegistry);
11395        }
11396        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseDelimitedFrom(java.io.InputStream input)
11397            throws java.io.IOException {
11398          return PARSER.parseDelimitedFrom(input);
11399        }
11400        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseDelimitedFrom(
11401            java.io.InputStream input,
11402            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11403            throws java.io.IOException {
11404          return PARSER.parseDelimitedFrom(input, extensionRegistry);
11405        }
11406        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
11407            com.google.protobuf.CodedInputStream input)
11408            throws java.io.IOException {
11409          return PARSER.parseFrom(input);
11410        }
11411        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
11412            com.google.protobuf.CodedInputStream input,
11413            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11414            throws java.io.IOException {
11415          return PARSER.parseFrom(input, extensionRegistry);
11416        }
11417    
11418        public static Builder newBuilder() { return Builder.create(); }
11419        public Builder newBuilderForType() { return newBuilder(); }
11420        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto prototype) {
11421          return newBuilder().mergeFrom(prototype);
11422        }
11423        public Builder toBuilder() { return newBuilder(this); }
11424    
11425        @java.lang.Override
11426        protected Builder newBuilderForType(
11427            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
11428          Builder builder = new Builder(parent);
11429          return builder;
11430        }
11431        /**
11432         * Protobuf type {@code hadoop.hdfs.FormatResponseProto}
11433         */
11434        public static final class Builder extends
11435            com.google.protobuf.GeneratedMessage.Builder<Builder>
11436           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProtoOrBuilder {
11437          public static final com.google.protobuf.Descriptors.Descriptor
11438              getDescriptor() {
11439            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatResponseProto_descriptor;
11440          }
11441    
11442          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11443              internalGetFieldAccessorTable() {
11444            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatResponseProto_fieldAccessorTable
11445                .ensureFieldAccessorsInitialized(
11446                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.Builder.class);
11447          }
11448    
11449          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.newBuilder()
11450          private Builder() {
11451            maybeForceBuilderInitialization();
11452          }
11453    
11454          private Builder(
11455              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
11456            super(parent);
11457            maybeForceBuilderInitialization();
11458          }
11459          private void maybeForceBuilderInitialization() {
11460            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
11461            }
11462          }
11463          private static Builder create() {
11464            return new Builder();
11465          }
11466    
11467          public Builder clear() {
11468            super.clear();
11469            return this;
11470          }
11471    
11472          public Builder clone() {
11473            return create().mergeFrom(buildPartial());
11474          }
11475    
11476          public com.google.protobuf.Descriptors.Descriptor
11477              getDescriptorForType() {
11478            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatResponseProto_descriptor;
11479          }
11480    
11481          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto getDefaultInstanceForType() {
11482            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance();
11483          }
11484    
11485          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto build() {
11486            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto result = buildPartial();
11487            if (!result.isInitialized()) {
11488              throw newUninitializedMessageException(result);
11489            }
11490            return result;
11491          }
11492    
11493          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto buildPartial() {
11494            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto(this);
11495            onBuilt();
11496            return result;
11497          }
11498    
11499          public Builder mergeFrom(com.google.protobuf.Message other) {
11500            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) {
11501              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto)other);
11502            } else {
11503              super.mergeFrom(other);
11504              return this;
11505            }
11506          }
11507    
11508          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto other) {
11509            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance()) return this;
11510            this.mergeUnknownFields(other.getUnknownFields());
11511            return this;
11512          }
11513    
11514          public final boolean isInitialized() {
11515            return true;
11516          }
11517    
11518          public Builder mergeFrom(
11519              com.google.protobuf.CodedInputStream input,
11520              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11521              throws java.io.IOException {
11522            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parsedMessage = null;
11523            try {
11524              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
11525            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
11526              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) e.getUnfinishedMessage();
11527              throw e;
11528            } finally {
11529              if (parsedMessage != null) {
11530                mergeFrom(parsedMessage);
11531              }
11532            }
11533            return this;
11534          }
11535    
11536          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FormatResponseProto)
11537        }
11538    
11539        static {
11540          defaultInstance = new FormatResponseProto(true);
11541          defaultInstance.initFields();
11542        }
11543    
11544        // @@protoc_insertion_point(class_scope:hadoop.hdfs.FormatResponseProto)
11545      }
11546    
11547      public interface NewEpochRequestProtoOrBuilder
11548          extends com.google.protobuf.MessageOrBuilder {
11549    
11550        // required .hadoop.hdfs.JournalIdProto jid = 1;
11551        /**
11552         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11553         */
11554        boolean hasJid();
11555        /**
11556         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11557         */
11558        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
11559        /**
11560         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11561         */
11562        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
11563    
11564        // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
11565        /**
11566         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11567         */
11568        boolean hasNsInfo();
11569        /**
11570         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11571         */
11572        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo();
11573        /**
11574         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11575         */
11576        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder();
11577    
11578        // required uint64 epoch = 3;
11579        /**
11580         * <code>required uint64 epoch = 3;</code>
11581         */
11582        boolean hasEpoch();
11583        /**
11584         * <code>required uint64 epoch = 3;</code>
11585         */
11586        long getEpoch();
11587      }
11588      /**
11589       * Protobuf type {@code hadoop.hdfs.NewEpochRequestProto}
11590       *
11591       * <pre>
11592       **
11593       * newEpoch()
11594       * </pre>
11595       */
11596      public static final class NewEpochRequestProto extends
11597          com.google.protobuf.GeneratedMessage
11598          implements NewEpochRequestProtoOrBuilder {
11599        // Use NewEpochRequestProto.newBuilder() to construct.
11600        private NewEpochRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
11601          super(builder);
11602          this.unknownFields = builder.getUnknownFields();
11603        }
11604        private NewEpochRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
11605    
11606        private static final NewEpochRequestProto defaultInstance;
11607        public static NewEpochRequestProto getDefaultInstance() {
11608          return defaultInstance;
11609        }
11610    
11611        public NewEpochRequestProto getDefaultInstanceForType() {
11612          return defaultInstance;
11613        }
11614    
11615        private final com.google.protobuf.UnknownFieldSet unknownFields;
11616        @java.lang.Override
11617        public final com.google.protobuf.UnknownFieldSet
11618            getUnknownFields() {
11619          return this.unknownFields;
11620        }
11621        private NewEpochRequestProto(
11622            com.google.protobuf.CodedInputStream input,
11623            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11624            throws com.google.protobuf.InvalidProtocolBufferException {
11625          initFields();
11626          int mutable_bitField0_ = 0;
11627          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
11628              com.google.protobuf.UnknownFieldSet.newBuilder();
11629          try {
11630            boolean done = false;
11631            while (!done) {
11632              int tag = input.readTag();
11633              switch (tag) {
11634                case 0:
11635                  done = true;
11636                  break;
11637                default: {
11638                  if (!parseUnknownField(input, unknownFields,
11639                                         extensionRegistry, tag)) {
11640                    done = true;
11641                  }
11642                  break;
11643                }
11644                case 10: {
11645                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
11646                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
11647                    subBuilder = jid_.toBuilder();
11648                  }
11649                  jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
11650                  if (subBuilder != null) {
11651                    subBuilder.mergeFrom(jid_);
11652                    jid_ = subBuilder.buildPartial();
11653                  }
11654                  bitField0_ |= 0x00000001;
11655                  break;
11656                }
11657                case 18: {
11658                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder subBuilder = null;
11659                  if (((bitField0_ & 0x00000002) == 0x00000002)) {
11660                    subBuilder = nsInfo_.toBuilder();
11661                  }
11662                  nsInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.PARSER, extensionRegistry);
11663                  if (subBuilder != null) {
11664                    subBuilder.mergeFrom(nsInfo_);
11665                    nsInfo_ = subBuilder.buildPartial();
11666                  }
11667                  bitField0_ |= 0x00000002;
11668                  break;
11669                }
11670                case 24: {
11671                  bitField0_ |= 0x00000004;
11672                  epoch_ = input.readUInt64();
11673                  break;
11674                }
11675              }
11676            }
11677          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
11678            throw e.setUnfinishedMessage(this);
11679          } catch (java.io.IOException e) {
11680            throw new com.google.protobuf.InvalidProtocolBufferException(
11681                e.getMessage()).setUnfinishedMessage(this);
11682          } finally {
11683            this.unknownFields = unknownFields.build();
11684            makeExtensionsImmutable();
11685          }
11686        }
11687        public static final com.google.protobuf.Descriptors.Descriptor
11688            getDescriptor() {
11689          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor;
11690        }
11691    
11692        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11693            internalGetFieldAccessorTable() {
11694          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochRequestProto_fieldAccessorTable
11695              .ensureFieldAccessorsInitialized(
11696                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.Builder.class);
11697        }
11698    
11699        public static com.google.protobuf.Parser<NewEpochRequestProto> PARSER =
11700            new com.google.protobuf.AbstractParser<NewEpochRequestProto>() {
11701          public NewEpochRequestProto parsePartialFrom(
11702              com.google.protobuf.CodedInputStream input,
11703              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11704              throws com.google.protobuf.InvalidProtocolBufferException {
11705            return new NewEpochRequestProto(input, extensionRegistry);
11706          }
11707        };
11708    
11709        @java.lang.Override
11710        public com.google.protobuf.Parser<NewEpochRequestProto> getParserForType() {
11711          return PARSER;
11712        }
11713    
11714        private int bitField0_;
11715        // required .hadoop.hdfs.JournalIdProto jid = 1;
11716        public static final int JID_FIELD_NUMBER = 1;
11717        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
11718        /**
11719         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11720         */
11721        public boolean hasJid() {
11722          return ((bitField0_ & 0x00000001) == 0x00000001);
11723        }
11724        /**
11725         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11726         */
11727        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
11728          return jid_;
11729        }
11730        /**
11731         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
11732         */
11733        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
11734          return jid_;
11735        }
11736    
11737        // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
11738        public static final int NSINFO_FIELD_NUMBER = 2;
11739        private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_;
11740        /**
11741         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11742         */
11743        public boolean hasNsInfo() {
11744          return ((bitField0_ & 0x00000002) == 0x00000002);
11745        }
11746        /**
11747         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11748         */
11749        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
11750          return nsInfo_;
11751        }
11752        /**
11753         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
11754         */
11755        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
11756          return nsInfo_;
11757        }
11758    
11759        // required uint64 epoch = 3;
11760        public static final int EPOCH_FIELD_NUMBER = 3;
11761        private long epoch_;
11762        /**
11763         * <code>required uint64 epoch = 3;</code>
11764         */
11765        public boolean hasEpoch() {
11766          return ((bitField0_ & 0x00000004) == 0x00000004);
11767        }
11768        /**
11769         * <code>required uint64 epoch = 3;</code>
11770         */
11771        public long getEpoch() {
11772          return epoch_;
11773        }
11774    
11775        private void initFields() {
11776          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
11777          nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
11778          epoch_ = 0L;
11779        }
11780        private byte memoizedIsInitialized = -1;
11781        public final boolean isInitialized() {
11782          byte isInitialized = memoizedIsInitialized;
11783          if (isInitialized != -1) return isInitialized == 1;
11784    
11785          if (!hasJid()) {
11786            memoizedIsInitialized = 0;
11787            return false;
11788          }
11789          if (!hasNsInfo()) {
11790            memoizedIsInitialized = 0;
11791            return false;
11792          }
11793          if (!hasEpoch()) {
11794            memoizedIsInitialized = 0;
11795            return false;
11796          }
11797          if (!getJid().isInitialized()) {
11798            memoizedIsInitialized = 0;
11799            return false;
11800          }
11801          if (!getNsInfo().isInitialized()) {
11802            memoizedIsInitialized = 0;
11803            return false;
11804          }
11805          memoizedIsInitialized = 1;
11806          return true;
11807        }
11808    
11809        public void writeTo(com.google.protobuf.CodedOutputStream output)
11810                            throws java.io.IOException {
11811          getSerializedSize();
11812          if (((bitField0_ & 0x00000001) == 0x00000001)) {
11813            output.writeMessage(1, jid_);
11814          }
11815          if (((bitField0_ & 0x00000002) == 0x00000002)) {
11816            output.writeMessage(2, nsInfo_);
11817          }
11818          if (((bitField0_ & 0x00000004) == 0x00000004)) {
11819            output.writeUInt64(3, epoch_);
11820          }
11821          getUnknownFields().writeTo(output);
11822        }
11823    
11824        private int memoizedSerializedSize = -1;
11825        public int getSerializedSize() {
11826          int size = memoizedSerializedSize;
11827          if (size != -1) return size;
11828    
11829          size = 0;
11830          if (((bitField0_ & 0x00000001) == 0x00000001)) {
11831            size += com.google.protobuf.CodedOutputStream
11832              .computeMessageSize(1, jid_);
11833          }
11834          if (((bitField0_ & 0x00000002) == 0x00000002)) {
11835            size += com.google.protobuf.CodedOutputStream
11836              .computeMessageSize(2, nsInfo_);
11837          }
11838          if (((bitField0_ & 0x00000004) == 0x00000004)) {
11839            size += com.google.protobuf.CodedOutputStream
11840              .computeUInt64Size(3, epoch_);
11841          }
11842          size += getUnknownFields().getSerializedSize();
11843          memoizedSerializedSize = size;
11844          return size;
11845        }
11846    
11847        private static final long serialVersionUID = 0L;
11848        @java.lang.Override
11849        protected java.lang.Object writeReplace()
11850            throws java.io.ObjectStreamException {
11851          return super.writeReplace();
11852        }
11853    
11854        @java.lang.Override
11855        public boolean equals(final java.lang.Object obj) {
11856          if (obj == this) {
11857           return true;
11858          }
11859          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)) {
11860            return super.equals(obj);
11861          }
11862          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto) obj;
11863    
11864          boolean result = true;
11865          result = result && (hasJid() == other.hasJid());
11866          if (hasJid()) {
11867            result = result && getJid()
11868                .equals(other.getJid());
11869          }
11870          result = result && (hasNsInfo() == other.hasNsInfo());
11871          if (hasNsInfo()) {
11872            result = result && getNsInfo()
11873                .equals(other.getNsInfo());
11874          }
11875          result = result && (hasEpoch() == other.hasEpoch());
11876          if (hasEpoch()) {
11877            result = result && (getEpoch()
11878                == other.getEpoch());
11879          }
11880          result = result &&
11881              getUnknownFields().equals(other.getUnknownFields());
11882          return result;
11883        }
11884    
11885        private int memoizedHashCode = 0;
11886        @java.lang.Override
11887        public int hashCode() {
11888          if (memoizedHashCode != 0) {
11889            return memoizedHashCode;
11890          }
11891          int hash = 41;
11892          hash = (19 * hash) + getDescriptorForType().hashCode();
11893          if (hasJid()) {
11894            hash = (37 * hash) + JID_FIELD_NUMBER;
11895            hash = (53 * hash) + getJid().hashCode();
11896          }
11897          if (hasNsInfo()) {
11898            hash = (37 * hash) + NSINFO_FIELD_NUMBER;
11899            hash = (53 * hash) + getNsInfo().hashCode();
11900          }
11901          if (hasEpoch()) {
11902            hash = (37 * hash) + EPOCH_FIELD_NUMBER;
11903            hash = (53 * hash) + hashLong(getEpoch());
11904          }
11905          hash = (29 * hash) + getUnknownFields().hashCode();
11906          memoizedHashCode = hash;
11907          return hash;
11908        }
11909    
11910        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
11911            com.google.protobuf.ByteString data)
11912            throws com.google.protobuf.InvalidProtocolBufferException {
11913          return PARSER.parseFrom(data);
11914        }
11915        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
11916            com.google.protobuf.ByteString data,
11917            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11918            throws com.google.protobuf.InvalidProtocolBufferException {
11919          return PARSER.parseFrom(data, extensionRegistry);
11920        }
11921        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(byte[] data)
11922            throws com.google.protobuf.InvalidProtocolBufferException {
11923          return PARSER.parseFrom(data);
11924        }
11925        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
11926            byte[] data,
11927            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11928            throws com.google.protobuf.InvalidProtocolBufferException {
11929          return PARSER.parseFrom(data, extensionRegistry);
11930        }
11931        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(java.io.InputStream input)
11932            throws java.io.IOException {
11933          return PARSER.parseFrom(input);
11934        }
11935        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
11936            java.io.InputStream input,
11937            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11938            throws java.io.IOException {
11939          return PARSER.parseFrom(input, extensionRegistry);
11940        }
11941        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseDelimitedFrom(java.io.InputStream input)
11942            throws java.io.IOException {
11943          return PARSER.parseDelimitedFrom(input);
11944        }
11945        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseDelimitedFrom(
11946            java.io.InputStream input,
11947            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11948            throws java.io.IOException {
11949          return PARSER.parseDelimitedFrom(input, extensionRegistry);
11950        }
11951        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
11952            com.google.protobuf.CodedInputStream input)
11953            throws java.io.IOException {
11954          return PARSER.parseFrom(input);
11955        }
11956        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
11957            com.google.protobuf.CodedInputStream input,
11958            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11959            throws java.io.IOException {
11960          return PARSER.parseFrom(input, extensionRegistry);
11961        }
11962    
11963        public static Builder newBuilder() { return Builder.create(); }
11964        public Builder newBuilderForType() { return newBuilder(); }
11965        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto prototype) {
11966          return newBuilder().mergeFrom(prototype);
11967        }
11968        public Builder toBuilder() { return newBuilder(this); }
11969    
11970        @java.lang.Override
11971        protected Builder newBuilderForType(
11972            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
11973          Builder builder = new Builder(parent);
11974          return builder;
11975        }
11976        /**
11977         * Protobuf type {@code hadoop.hdfs.NewEpochRequestProto}
11978         *
11979         * <pre>
11980         **
11981         * newEpoch()
11982         * </pre>
11983         */
11984        public static final class Builder extends
11985            com.google.protobuf.GeneratedMessage.Builder<Builder>
11986           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProtoOrBuilder {
11987          public static final com.google.protobuf.Descriptors.Descriptor
11988              getDescriptor() {
11989            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor;
11990          }
11991    
11992          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11993              internalGetFieldAccessorTable() {
11994            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochRequestProto_fieldAccessorTable
11995                .ensureFieldAccessorsInitialized(
11996                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.Builder.class);
11997          }
11998    
11999          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.newBuilder()
12000          private Builder() {
12001            maybeForceBuilderInitialization();
12002          }
12003    
12004          private Builder(
12005              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
12006            super(parent);
12007            maybeForceBuilderInitialization();
12008          }
12009          private void maybeForceBuilderInitialization() {
12010            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
12011              getJidFieldBuilder();
12012              getNsInfoFieldBuilder();
12013            }
12014          }
12015          private static Builder create() {
12016            return new Builder();
12017          }
12018    
12019          public Builder clear() {
12020            super.clear();
12021            if (jidBuilder_ == null) {
12022              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
12023            } else {
12024              jidBuilder_.clear();
12025            }
12026            bitField0_ = (bitField0_ & ~0x00000001);
12027            if (nsInfoBuilder_ == null) {
12028              nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
12029            } else {
12030              nsInfoBuilder_.clear();
12031            }
12032            bitField0_ = (bitField0_ & ~0x00000002);
12033            epoch_ = 0L;
12034            bitField0_ = (bitField0_ & ~0x00000004);
12035            return this;
12036          }
12037    
12038          public Builder clone() {
12039            return create().mergeFrom(buildPartial());
12040          }
12041    
12042          public com.google.protobuf.Descriptors.Descriptor
12043              getDescriptorForType() {
12044            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor;
12045          }
12046    
12047          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto getDefaultInstanceForType() {
12048            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance();
12049          }
12050    
12051          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto build() {
12052            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto result = buildPartial();
12053            if (!result.isInitialized()) {
12054              throw newUninitializedMessageException(result);
12055            }
12056            return result;
12057          }
12058    
12059          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto buildPartial() {
12060            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto(this);
12061            int from_bitField0_ = bitField0_;
12062            int to_bitField0_ = 0;
12063            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
12064              to_bitField0_ |= 0x00000001;
12065            }
12066            if (jidBuilder_ == null) {
12067              result.jid_ = jid_;
12068            } else {
12069              result.jid_ = jidBuilder_.build();
12070            }
12071            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
12072              to_bitField0_ |= 0x00000002;
12073            }
12074            if (nsInfoBuilder_ == null) {
12075              result.nsInfo_ = nsInfo_;
12076            } else {
12077              result.nsInfo_ = nsInfoBuilder_.build();
12078            }
12079            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
12080              to_bitField0_ |= 0x00000004;
12081            }
12082            result.epoch_ = epoch_;
12083            result.bitField0_ = to_bitField0_;
12084            onBuilt();
12085            return result;
12086          }
12087    
12088          public Builder mergeFrom(com.google.protobuf.Message other) {
12089            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto) {
12090              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)other);
12091            } else {
12092              super.mergeFrom(other);
12093              return this;
12094            }
12095          }
12096    
12097          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto other) {
12098            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance()) return this;
12099            if (other.hasJid()) {
12100              mergeJid(other.getJid());
12101            }
12102            if (other.hasNsInfo()) {
12103              mergeNsInfo(other.getNsInfo());
12104            }
12105            if (other.hasEpoch()) {
12106              setEpoch(other.getEpoch());
12107            }
12108            this.mergeUnknownFields(other.getUnknownFields());
12109            return this;
12110          }
12111    
12112          public final boolean isInitialized() {
12113            if (!hasJid()) {
12114              
12115              return false;
12116            }
12117            if (!hasNsInfo()) {
12118              
12119              return false;
12120            }
12121            if (!hasEpoch()) {
12122              
12123              return false;
12124            }
12125            if (!getJid().isInitialized()) {
12126              
12127              return false;
12128            }
12129            if (!getNsInfo().isInitialized()) {
12130              
12131              return false;
12132            }
12133            return true;
12134          }
12135    
12136          public Builder mergeFrom(
12137              com.google.protobuf.CodedInputStream input,
12138              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12139              throws java.io.IOException {
12140            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parsedMessage = null;
12141            try {
12142              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
12143            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
12144              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto) e.getUnfinishedMessage();
12145              throw e;
12146            } finally {
12147              if (parsedMessage != null) {
12148                mergeFrom(parsedMessage);
12149              }
12150            }
12151            return this;
12152          }
12153          private int bitField0_;
12154    
12155          // required .hadoop.hdfs.JournalIdProto jid = 1;
12156          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
12157          private com.google.protobuf.SingleFieldBuilder<
12158              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
12159          /**
12160           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12161           */
12162          public boolean hasJid() {
12163            return ((bitField0_ & 0x00000001) == 0x00000001);
12164          }
12165          /**
12166           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12167           */
12168          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
12169            if (jidBuilder_ == null) {
12170              return jid_;
12171            } else {
12172              return jidBuilder_.getMessage();
12173            }
12174          }
12175          /**
12176           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12177           */
12178          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
12179            if (jidBuilder_ == null) {
12180              if (value == null) {
12181                throw new NullPointerException();
12182              }
12183              jid_ = value;
12184              onChanged();
12185            } else {
12186              jidBuilder_.setMessage(value);
12187            }
12188            bitField0_ |= 0x00000001;
12189            return this;
12190          }
12191          /**
12192           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12193           */
12194          public Builder setJid(
12195              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
12196            if (jidBuilder_ == null) {
12197              jid_ = builderForValue.build();
12198              onChanged();
12199            } else {
12200              jidBuilder_.setMessage(builderForValue.build());
12201            }
12202            bitField0_ |= 0x00000001;
12203            return this;
12204          }
12205          /**
12206           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12207           */
12208          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
12209            if (jidBuilder_ == null) {
12210              if (((bitField0_ & 0x00000001) == 0x00000001) &&
12211                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
12212                jid_ =
12213                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
12214              } else {
12215                jid_ = value;
12216              }
12217              onChanged();
12218            } else {
12219              jidBuilder_.mergeFrom(value);
12220            }
12221            bitField0_ |= 0x00000001;
12222            return this;
12223          }
12224          /**
12225           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12226           */
12227          public Builder clearJid() {
12228            if (jidBuilder_ == null) {
12229              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
12230              onChanged();
12231            } else {
12232              jidBuilder_.clear();
12233            }
12234            bitField0_ = (bitField0_ & ~0x00000001);
12235            return this;
12236          }
12237          /**
12238           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12239           */
12240          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
12241            bitField0_ |= 0x00000001;
12242            onChanged();
12243            return getJidFieldBuilder().getBuilder();
12244          }
12245          /**
12246           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12247           */
12248          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
12249            if (jidBuilder_ != null) {
12250              return jidBuilder_.getMessageOrBuilder();
12251            } else {
12252              return jid_;
12253            }
12254          }
12255          /**
12256           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12257           */
12258          private com.google.protobuf.SingleFieldBuilder<
12259              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
12260              getJidFieldBuilder() {
12261            if (jidBuilder_ == null) {
12262              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
12263                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
12264                      jid_,
12265                      getParentForChildren(),
12266                      isClean());
12267              jid_ = null;
12268            }
12269            return jidBuilder_;
12270          }
12271    
12272          // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
12273          private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
12274          private com.google.protobuf.SingleFieldBuilder<
12275              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> nsInfoBuilder_;
12276          /**
12277           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12278           */
12279          public boolean hasNsInfo() {
12280            return ((bitField0_ & 0x00000002) == 0x00000002);
12281          }
12282          /**
12283           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12284           */
12285          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
12286            if (nsInfoBuilder_ == null) {
12287              return nsInfo_;
12288            } else {
12289              return nsInfoBuilder_.getMessage();
12290            }
12291          }
12292          /**
12293           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12294           */
12295          public Builder setNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
12296            if (nsInfoBuilder_ == null) {
12297              if (value == null) {
12298                throw new NullPointerException();
12299              }
12300              nsInfo_ = value;
12301              onChanged();
12302            } else {
12303              nsInfoBuilder_.setMessage(value);
12304            }
12305            bitField0_ |= 0x00000002;
12306            return this;
12307          }
12308          /**
12309           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12310           */
12311          public Builder setNsInfo(
12312              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder builderForValue) {
12313            if (nsInfoBuilder_ == null) {
12314              nsInfo_ = builderForValue.build();
12315              onChanged();
12316            } else {
12317              nsInfoBuilder_.setMessage(builderForValue.build());
12318            }
12319            bitField0_ |= 0x00000002;
12320            return this;
12321          }
12322          /**
12323           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12324           */
12325          public Builder mergeNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
12326            if (nsInfoBuilder_ == null) {
12327              if (((bitField0_ & 0x00000002) == 0x00000002) &&
12328                  nsInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance()) {
12329                nsInfo_ =
12330                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.newBuilder(nsInfo_).mergeFrom(value).buildPartial();
12331              } else {
12332                nsInfo_ = value;
12333              }
12334              onChanged();
12335            } else {
12336              nsInfoBuilder_.mergeFrom(value);
12337            }
12338            bitField0_ |= 0x00000002;
12339            return this;
12340          }
12341          /**
12342           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12343           */
12344          public Builder clearNsInfo() {
12345            if (nsInfoBuilder_ == null) {
12346              nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
12347              onChanged();
12348            } else {
12349              nsInfoBuilder_.clear();
12350            }
12351            bitField0_ = (bitField0_ & ~0x00000002);
12352            return this;
12353          }
12354          /**
12355           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12356           */
12357          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder getNsInfoBuilder() {
12358            bitField0_ |= 0x00000002;
12359            onChanged();
12360            return getNsInfoFieldBuilder().getBuilder();
12361          }
12362          /**
12363           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12364           */
12365          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
12366            if (nsInfoBuilder_ != null) {
12367              return nsInfoBuilder_.getMessageOrBuilder();
12368            } else {
12369              return nsInfo_;
12370            }
12371          }
12372          /**
12373           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
12374           */
12375          private com.google.protobuf.SingleFieldBuilder<
12376              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> 
12377              getNsInfoFieldBuilder() {
12378            if (nsInfoBuilder_ == null) {
12379              nsInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
12380                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder>(
12381                      nsInfo_,
12382                      getParentForChildren(),
12383                      isClean());
12384              nsInfo_ = null;
12385            }
12386            return nsInfoBuilder_;
12387          }
12388    
12389          // required uint64 epoch = 3;
12390          private long epoch_ ;
12391          /**
12392           * <code>required uint64 epoch = 3;</code>
12393           */
12394          public boolean hasEpoch() {
12395            return ((bitField0_ & 0x00000004) == 0x00000004);
12396          }
12397          /**
12398           * <code>required uint64 epoch = 3;</code>
12399           */
12400          public long getEpoch() {
12401            return epoch_;
12402          }
12403          /**
12404           * <code>required uint64 epoch = 3;</code>
12405           */
12406          public Builder setEpoch(long value) {
12407            bitField0_ |= 0x00000004;
12408            epoch_ = value;
12409            onChanged();
12410            return this;
12411          }
12412          /**
12413           * <code>required uint64 epoch = 3;</code>
12414           */
12415          public Builder clearEpoch() {
12416            bitField0_ = (bitField0_ & ~0x00000004);
12417            epoch_ = 0L;
12418            onChanged();
12419            return this;
12420          }
12421    
12422          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.NewEpochRequestProto)
12423        }
12424    
12425        static {
12426          defaultInstance = new NewEpochRequestProto(true);
12427          defaultInstance.initFields();
12428        }
12429    
12430        // @@protoc_insertion_point(class_scope:hadoop.hdfs.NewEpochRequestProto)
12431      }
12432    
12433      public interface NewEpochResponseProtoOrBuilder
12434          extends com.google.protobuf.MessageOrBuilder {
12435    
12436        // optional uint64 lastSegmentTxId = 1;
12437        /**
12438         * <code>optional uint64 lastSegmentTxId = 1;</code>
12439         */
12440        boolean hasLastSegmentTxId();
12441        /**
12442         * <code>optional uint64 lastSegmentTxId = 1;</code>
12443         */
12444        long getLastSegmentTxId();
12445      }
12446      /**
12447       * Protobuf type {@code hadoop.hdfs.NewEpochResponseProto}
12448       */
12449      public static final class NewEpochResponseProto extends
12450          com.google.protobuf.GeneratedMessage
12451          implements NewEpochResponseProtoOrBuilder {
12452        // Use NewEpochResponseProto.newBuilder() to construct.
12453        private NewEpochResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
12454          super(builder);
12455          this.unknownFields = builder.getUnknownFields();
12456        }
12457        private NewEpochResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
12458    
12459        private static final NewEpochResponseProto defaultInstance;
12460        public static NewEpochResponseProto getDefaultInstance() {
12461          return defaultInstance;
12462        }
12463    
12464        public NewEpochResponseProto getDefaultInstanceForType() {
12465          return defaultInstance;
12466        }
12467    
12468        private final com.google.protobuf.UnknownFieldSet unknownFields;
12469        @java.lang.Override
12470        public final com.google.protobuf.UnknownFieldSet
12471            getUnknownFields() {
12472          return this.unknownFields;
12473        }
12474        private NewEpochResponseProto(
12475            com.google.protobuf.CodedInputStream input,
12476            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12477            throws com.google.protobuf.InvalidProtocolBufferException {
12478          initFields();
12479          int mutable_bitField0_ = 0;
12480          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
12481              com.google.protobuf.UnknownFieldSet.newBuilder();
12482          try {
12483            boolean done = false;
12484            while (!done) {
12485              int tag = input.readTag();
12486              switch (tag) {
12487                case 0:
12488                  done = true;
12489                  break;
12490                default: {
12491                  if (!parseUnknownField(input, unknownFields,
12492                                         extensionRegistry, tag)) {
12493                    done = true;
12494                  }
12495                  break;
12496                }
12497                case 8: {
12498                  bitField0_ |= 0x00000001;
12499                  lastSegmentTxId_ = input.readUInt64();
12500                  break;
12501                }
12502              }
12503            }
12504          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
12505            throw e.setUnfinishedMessage(this);
12506          } catch (java.io.IOException e) {
12507            throw new com.google.protobuf.InvalidProtocolBufferException(
12508                e.getMessage()).setUnfinishedMessage(this);
12509          } finally {
12510            this.unknownFields = unknownFields.build();
12511            makeExtensionsImmutable();
12512          }
12513        }
12514        public static final com.google.protobuf.Descriptors.Descriptor
12515            getDescriptor() {
12516          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor;
12517        }
12518    
12519        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
12520            internalGetFieldAccessorTable() {
12521          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochResponseProto_fieldAccessorTable
12522              .ensureFieldAccessorsInitialized(
12523                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.Builder.class);
12524        }
12525    
12526        public static com.google.protobuf.Parser<NewEpochResponseProto> PARSER =
12527            new com.google.protobuf.AbstractParser<NewEpochResponseProto>() {
12528          public NewEpochResponseProto parsePartialFrom(
12529              com.google.protobuf.CodedInputStream input,
12530              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12531              throws com.google.protobuf.InvalidProtocolBufferException {
12532            return new NewEpochResponseProto(input, extensionRegistry);
12533          }
12534        };
12535    
12536        @java.lang.Override
12537        public com.google.protobuf.Parser<NewEpochResponseProto> getParserForType() {
12538          return PARSER;
12539        }
12540    
12541        private int bitField0_;
12542        // optional uint64 lastSegmentTxId = 1;
12543        public static final int LASTSEGMENTTXID_FIELD_NUMBER = 1;
12544        private long lastSegmentTxId_;
12545        /**
12546         * <code>optional uint64 lastSegmentTxId = 1;</code>
12547         */
12548        public boolean hasLastSegmentTxId() {
12549          return ((bitField0_ & 0x00000001) == 0x00000001);
12550        }
12551        /**
12552         * <code>optional uint64 lastSegmentTxId = 1;</code>
12553         */
12554        public long getLastSegmentTxId() {
12555          return lastSegmentTxId_;
12556        }
12557    
12558        private void initFields() {
12559          lastSegmentTxId_ = 0L;
12560        }
12561        private byte memoizedIsInitialized = -1;
12562        public final boolean isInitialized() {
12563          byte isInitialized = memoizedIsInitialized;
12564          if (isInitialized != -1) return isInitialized == 1;
12565    
12566          memoizedIsInitialized = 1;
12567          return true;
12568        }
12569    
12570        public void writeTo(com.google.protobuf.CodedOutputStream output)
12571                            throws java.io.IOException {
12572          getSerializedSize();
12573          if (((bitField0_ & 0x00000001) == 0x00000001)) {
12574            output.writeUInt64(1, lastSegmentTxId_);
12575          }
12576          getUnknownFields().writeTo(output);
12577        }
12578    
12579        private int memoizedSerializedSize = -1;
12580        public int getSerializedSize() {
12581          int size = memoizedSerializedSize;
12582          if (size != -1) return size;
12583    
12584          size = 0;
12585          if (((bitField0_ & 0x00000001) == 0x00000001)) {
12586            size += com.google.protobuf.CodedOutputStream
12587              .computeUInt64Size(1, lastSegmentTxId_);
12588          }
12589          size += getUnknownFields().getSerializedSize();
12590          memoizedSerializedSize = size;
12591          return size;
12592        }
12593    
12594        private static final long serialVersionUID = 0L;
12595        @java.lang.Override
12596        protected java.lang.Object writeReplace()
12597            throws java.io.ObjectStreamException {
12598          return super.writeReplace();
12599        }
12600    
12601        @java.lang.Override
12602        public boolean equals(final java.lang.Object obj) {
12603          if (obj == this) {
12604           return true;
12605          }
12606          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto)) {
12607            return super.equals(obj);
12608          }
12609          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) obj;
12610    
12611          boolean result = true;
12612          result = result && (hasLastSegmentTxId() == other.hasLastSegmentTxId());
12613          if (hasLastSegmentTxId()) {
12614            result = result && (getLastSegmentTxId()
12615                == other.getLastSegmentTxId());
12616          }
12617          result = result &&
12618              getUnknownFields().equals(other.getUnknownFields());
12619          return result;
12620        }
12621    
12622        private int memoizedHashCode = 0;
12623        @java.lang.Override
12624        public int hashCode() {
12625          if (memoizedHashCode != 0) {
12626            return memoizedHashCode;
12627          }
12628          int hash = 41;
12629          hash = (19 * hash) + getDescriptorForType().hashCode();
12630          if (hasLastSegmentTxId()) {
12631            hash = (37 * hash) + LASTSEGMENTTXID_FIELD_NUMBER;
12632            hash = (53 * hash) + hashLong(getLastSegmentTxId());
12633          }
12634          hash = (29 * hash) + getUnknownFields().hashCode();
12635          memoizedHashCode = hash;
12636          return hash;
12637        }
12638    
12639        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
12640            com.google.protobuf.ByteString data)
12641            throws com.google.protobuf.InvalidProtocolBufferException {
12642          return PARSER.parseFrom(data);
12643        }
12644        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
12645            com.google.protobuf.ByteString data,
12646            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12647            throws com.google.protobuf.InvalidProtocolBufferException {
12648          return PARSER.parseFrom(data, extensionRegistry);
12649        }
12650        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(byte[] data)
12651            throws com.google.protobuf.InvalidProtocolBufferException {
12652          return PARSER.parseFrom(data);
12653        }
12654        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
12655            byte[] data,
12656            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12657            throws com.google.protobuf.InvalidProtocolBufferException {
12658          return PARSER.parseFrom(data, extensionRegistry);
12659        }
12660        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(java.io.InputStream input)
12661            throws java.io.IOException {
12662          return PARSER.parseFrom(input);
12663        }
12664        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
12665            java.io.InputStream input,
12666            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12667            throws java.io.IOException {
12668          return PARSER.parseFrom(input, extensionRegistry);
12669        }
12670        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseDelimitedFrom(java.io.InputStream input)
12671            throws java.io.IOException {
12672          return PARSER.parseDelimitedFrom(input);
12673        }
12674        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseDelimitedFrom(
12675            java.io.InputStream input,
12676            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12677            throws java.io.IOException {
12678          return PARSER.parseDelimitedFrom(input, extensionRegistry);
12679        }
12680        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
12681            com.google.protobuf.CodedInputStream input)
12682            throws java.io.IOException {
12683          return PARSER.parseFrom(input);
12684        }
12685        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
12686            com.google.protobuf.CodedInputStream input,
12687            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12688            throws java.io.IOException {
12689          return PARSER.parseFrom(input, extensionRegistry);
12690        }
12691    
12692        public static Builder newBuilder() { return Builder.create(); }
12693        public Builder newBuilderForType() { return newBuilder(); }
12694        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto prototype) {
12695          return newBuilder().mergeFrom(prototype);
12696        }
12697        public Builder toBuilder() { return newBuilder(this); }
12698    
12699        @java.lang.Override
12700        protected Builder newBuilderForType(
12701            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
12702          Builder builder = new Builder(parent);
12703          return builder;
12704        }
12705        /**
12706         * Protobuf type {@code hadoop.hdfs.NewEpochResponseProto}
12707         */
12708        public static final class Builder extends
12709            com.google.protobuf.GeneratedMessage.Builder<Builder>
12710           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProtoOrBuilder {
12711          public static final com.google.protobuf.Descriptors.Descriptor
12712              getDescriptor() {
12713            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor;
12714          }
12715    
12716          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
12717              internalGetFieldAccessorTable() {
12718            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochResponseProto_fieldAccessorTable
12719                .ensureFieldAccessorsInitialized(
12720                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.Builder.class);
12721          }
12722    
12723          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.newBuilder()
12724          private Builder() {
12725            maybeForceBuilderInitialization();
12726          }
12727    
12728          private Builder(
12729              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
12730            super(parent);
12731            maybeForceBuilderInitialization();
12732          }
12733          private void maybeForceBuilderInitialization() {
12734            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
12735            }
12736          }
12737          private static Builder create() {
12738            return new Builder();
12739          }
12740    
12741          public Builder clear() {
12742            super.clear();
12743            lastSegmentTxId_ = 0L;
12744            bitField0_ = (bitField0_ & ~0x00000001);
12745            return this;
12746          }
12747    
12748          public Builder clone() {
12749            return create().mergeFrom(buildPartial());
12750          }
12751    
12752          public com.google.protobuf.Descriptors.Descriptor
12753              getDescriptorForType() {
12754            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor;
12755          }
12756    
12757          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto getDefaultInstanceForType() {
12758            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance();
12759          }
12760    
12761          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto build() {
12762            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto result = buildPartial();
12763            if (!result.isInitialized()) {
12764              throw newUninitializedMessageException(result);
12765            }
12766            return result;
12767          }
12768    
12769          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto buildPartial() {
12770            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto(this);
12771            int from_bitField0_ = bitField0_;
12772            int to_bitField0_ = 0;
12773            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
12774              to_bitField0_ |= 0x00000001;
12775            }
12776            result.lastSegmentTxId_ = lastSegmentTxId_;
12777            result.bitField0_ = to_bitField0_;
12778            onBuilt();
12779            return result;
12780          }
12781    
12782          public Builder mergeFrom(com.google.protobuf.Message other) {
12783            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) {
12784              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto)other);
12785            } else {
12786              super.mergeFrom(other);
12787              return this;
12788            }
12789          }
12790    
12791          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto other) {
12792            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance()) return this;
12793            if (other.hasLastSegmentTxId()) {
12794              setLastSegmentTxId(other.getLastSegmentTxId());
12795            }
12796            this.mergeUnknownFields(other.getUnknownFields());
12797            return this;
12798          }
12799    
12800          public final boolean isInitialized() {
12801            return true;
12802          }
12803    
12804          public Builder mergeFrom(
12805              com.google.protobuf.CodedInputStream input,
12806              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12807              throws java.io.IOException {
12808            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parsedMessage = null;
12809            try {
12810              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
12811            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
12812              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) e.getUnfinishedMessage();
12813              throw e;
12814            } finally {
12815              if (parsedMessage != null) {
12816                mergeFrom(parsedMessage);
12817              }
12818            }
12819            return this;
12820          }
12821          private int bitField0_;
12822    
12823          // optional uint64 lastSegmentTxId = 1;
12824          private long lastSegmentTxId_ ;
12825          /**
12826           * <code>optional uint64 lastSegmentTxId = 1;</code>
12827           */
12828          public boolean hasLastSegmentTxId() {
12829            return ((bitField0_ & 0x00000001) == 0x00000001);
12830          }
12831          /**
12832           * <code>optional uint64 lastSegmentTxId = 1;</code>
12833           */
12834          public long getLastSegmentTxId() {
12835            return lastSegmentTxId_;
12836          }
12837          /**
12838           * <code>optional uint64 lastSegmentTxId = 1;</code>
12839           */
12840          public Builder setLastSegmentTxId(long value) {
12841            bitField0_ |= 0x00000001;
12842            lastSegmentTxId_ = value;
12843            onChanged();
12844            return this;
12845          }
12846          /**
12847           * <code>optional uint64 lastSegmentTxId = 1;</code>
12848           */
12849          public Builder clearLastSegmentTxId() {
12850            bitField0_ = (bitField0_ & ~0x00000001);
12851            lastSegmentTxId_ = 0L;
12852            onChanged();
12853            return this;
12854          }
12855    
12856          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.NewEpochResponseProto)
12857        }
12858    
12859        static {
12860          defaultInstance = new NewEpochResponseProto(true);
12861          defaultInstance.initFields();
12862        }
12863    
12864        // @@protoc_insertion_point(class_scope:hadoop.hdfs.NewEpochResponseProto)
12865      }
12866    
12867      public interface GetEditLogManifestRequestProtoOrBuilder
12868          extends com.google.protobuf.MessageOrBuilder {
12869    
12870        // required .hadoop.hdfs.JournalIdProto jid = 1;
12871        /**
12872         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12873         */
12874        boolean hasJid();
12875        /**
12876         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12877         */
12878        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
12879        /**
12880         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
12881         */
12882        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
12883    
12884        // required uint64 sinceTxId = 2;
12885        /**
12886         * <code>required uint64 sinceTxId = 2;</code>
12887         *
12888         * <pre>
12889         * Transaction ID
12890         * </pre>
12891         */
12892        boolean hasSinceTxId();
12893        /**
12894         * <code>required uint64 sinceTxId = 2;</code>
12895         *
12896         * <pre>
12897         * Transaction ID
12898         * </pre>
12899         */
12900        long getSinceTxId();
12901    
12902        // optional bool inProgressOk = 4 [default = false];
12903        /**
12904         * <code>optional bool inProgressOk = 4 [default = false];</code>
12905         *
12906         * <pre>
12907         * Whether or not the client will be reading from the returned streams.
12908         * optional bool forReading = 3 [default = true]; &lt;obsolete, do not reuse&gt;
12909         * </pre>
12910         */
12911        boolean hasInProgressOk();
12912        /**
12913         * <code>optional bool inProgressOk = 4 [default = false];</code>
12914         *
12915         * <pre>
12916         * Whether or not the client will be reading from the returned streams.
12917         * optional bool forReading = 3 [default = true]; &lt;obsolete, do not reuse&gt;
12918         * </pre>
12919         */
12920        boolean getInProgressOk();
12921      }
12922      /**
12923       * Protobuf type {@code hadoop.hdfs.GetEditLogManifestRequestProto}
12924       *
12925       * <pre>
12926       **
12927       * getEditLogManifest()
12928       * </pre>
12929       */
12930      public static final class GetEditLogManifestRequestProto extends
12931          com.google.protobuf.GeneratedMessage
12932          implements GetEditLogManifestRequestProtoOrBuilder {
12933        // Use GetEditLogManifestRequestProto.newBuilder() to construct.
12934        private GetEditLogManifestRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
12935          super(builder);
12936          this.unknownFields = builder.getUnknownFields();
12937        }
12938        private GetEditLogManifestRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
12939    
12940        private static final GetEditLogManifestRequestProto defaultInstance;
12941        public static GetEditLogManifestRequestProto getDefaultInstance() {
12942          return defaultInstance;
12943        }
12944    
12945        public GetEditLogManifestRequestProto getDefaultInstanceForType() {
12946          return defaultInstance;
12947        }
12948    
12949        private final com.google.protobuf.UnknownFieldSet unknownFields;
12950        @java.lang.Override
12951        public final com.google.protobuf.UnknownFieldSet
12952            getUnknownFields() {
12953          return this.unknownFields;
12954        }
12955        private GetEditLogManifestRequestProto(
12956            com.google.protobuf.CodedInputStream input,
12957            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12958            throws com.google.protobuf.InvalidProtocolBufferException {
12959          initFields();
12960          int mutable_bitField0_ = 0;
12961          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
12962              com.google.protobuf.UnknownFieldSet.newBuilder();
12963          try {
12964            boolean done = false;
12965            while (!done) {
12966              int tag = input.readTag();
12967              switch (tag) {
12968                case 0:
12969                  done = true;
12970                  break;
12971                default: {
12972                  if (!parseUnknownField(input, unknownFields,
12973                                         extensionRegistry, tag)) {
12974                    done = true;
12975                  }
12976                  break;
12977                }
12978                case 10: {
12979                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
12980                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
12981                    subBuilder = jid_.toBuilder();
12982                  }
12983                  jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
12984                  if (subBuilder != null) {
12985                    subBuilder.mergeFrom(jid_);
12986                    jid_ = subBuilder.buildPartial();
12987                  }
12988                  bitField0_ |= 0x00000001;
12989                  break;
12990                }
12991                case 16: {
12992                  bitField0_ |= 0x00000002;
12993                  sinceTxId_ = input.readUInt64();
12994                  break;
12995                }
12996                case 32: {
12997                  bitField0_ |= 0x00000004;
12998                  inProgressOk_ = input.readBool();
12999                  break;
13000                }
13001              }
13002            }
13003          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
13004            throw e.setUnfinishedMessage(this);
13005          } catch (java.io.IOException e) {
13006            throw new com.google.protobuf.InvalidProtocolBufferException(
13007                e.getMessage()).setUnfinishedMessage(this);
13008          } finally {
13009            this.unknownFields = unknownFields.build();
13010            makeExtensionsImmutable();
13011          }
13012        }
13013        public static final com.google.protobuf.Descriptors.Descriptor
13014            getDescriptor() {
13015          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor;
13016        }
13017    
13018        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
13019            internalGetFieldAccessorTable() {
13020          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_fieldAccessorTable
13021              .ensureFieldAccessorsInitialized(
13022                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.Builder.class);
13023        }
13024    
13025        public static com.google.protobuf.Parser<GetEditLogManifestRequestProto> PARSER =
13026            new com.google.protobuf.AbstractParser<GetEditLogManifestRequestProto>() {
13027          public GetEditLogManifestRequestProto parsePartialFrom(
13028              com.google.protobuf.CodedInputStream input,
13029              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13030              throws com.google.protobuf.InvalidProtocolBufferException {
13031            return new GetEditLogManifestRequestProto(input, extensionRegistry);
13032          }
13033        };
13034    
13035        @java.lang.Override
13036        public com.google.protobuf.Parser<GetEditLogManifestRequestProto> getParserForType() {
13037          return PARSER;
13038        }
13039    
13040        private int bitField0_;
13041        // required .hadoop.hdfs.JournalIdProto jid = 1;
13042        public static final int JID_FIELD_NUMBER = 1;
13043        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
13044        /**
13045         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13046         */
13047        public boolean hasJid() {
13048          return ((bitField0_ & 0x00000001) == 0x00000001);
13049        }
13050        /**
13051         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13052         */
13053        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
13054          return jid_;
13055        }
13056        /**
13057         * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13058         */
13059        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
13060          return jid_;
13061        }
13062    
13063        // required uint64 sinceTxId = 2;
13064        public static final int SINCETXID_FIELD_NUMBER = 2;
13065        private long sinceTxId_;
13066        /**
13067         * <code>required uint64 sinceTxId = 2;</code>
13068         *
13069         * <pre>
13070         * Transaction ID
13071         * </pre>
13072         */
13073        public boolean hasSinceTxId() {
13074          return ((bitField0_ & 0x00000002) == 0x00000002);
13075        }
13076        /**
13077         * <code>required uint64 sinceTxId = 2;</code>
13078         *
13079         * <pre>
13080         * Transaction ID
13081         * </pre>
13082         */
13083        public long getSinceTxId() {
13084          return sinceTxId_;
13085        }
13086    
13087        // optional bool inProgressOk = 4 [default = false];
13088        public static final int INPROGRESSOK_FIELD_NUMBER = 4;
13089        private boolean inProgressOk_;
13090        /**
13091         * <code>optional bool inProgressOk = 4 [default = false];</code>
13092         *
13093         * <pre>
13094         * Whether or not the client will be reading from the returned streams.
13095         * optional bool forReading = 3 [default = true]; &lt;obsolete, do not reuse&gt;
13096         * </pre>
13097         */
13098        public boolean hasInProgressOk() {
13099          return ((bitField0_ & 0x00000004) == 0x00000004);
13100        }
13101        /**
13102         * <code>optional bool inProgressOk = 4 [default = false];</code>
13103         *
13104         * <pre>
13105         * Whether or not the client will be reading from the returned streams.
13106         * optional bool forReading = 3 [default = true]; &lt;obsolete, do not reuse&gt;
13107         * </pre>
13108         */
13109        public boolean getInProgressOk() {
13110          return inProgressOk_;
13111        }
13112    
13113        private void initFields() {
13114          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
13115          sinceTxId_ = 0L;
13116          inProgressOk_ = false;
13117        }
13118        private byte memoizedIsInitialized = -1;
13119        public final boolean isInitialized() {
13120          byte isInitialized = memoizedIsInitialized;
13121          if (isInitialized != -1) return isInitialized == 1;
13122    
13123          if (!hasJid()) {
13124            memoizedIsInitialized = 0;
13125            return false;
13126          }
13127          if (!hasSinceTxId()) {
13128            memoizedIsInitialized = 0;
13129            return false;
13130          }
13131          if (!getJid().isInitialized()) {
13132            memoizedIsInitialized = 0;
13133            return false;
13134          }
13135          memoizedIsInitialized = 1;
13136          return true;
13137        }
13138    
13139        public void writeTo(com.google.protobuf.CodedOutputStream output)
13140                            throws java.io.IOException {
13141          getSerializedSize();
13142          if (((bitField0_ & 0x00000001) == 0x00000001)) {
13143            output.writeMessage(1, jid_);
13144          }
13145          if (((bitField0_ & 0x00000002) == 0x00000002)) {
13146            output.writeUInt64(2, sinceTxId_);
13147          }
13148          if (((bitField0_ & 0x00000004) == 0x00000004)) {
13149            output.writeBool(4, inProgressOk_);
13150          }
13151          getUnknownFields().writeTo(output);
13152        }
13153    
13154        private int memoizedSerializedSize = -1;
13155        public int getSerializedSize() {
13156          int size = memoizedSerializedSize;
13157          if (size != -1) return size;
13158    
13159          size = 0;
13160          if (((bitField0_ & 0x00000001) == 0x00000001)) {
13161            size += com.google.protobuf.CodedOutputStream
13162              .computeMessageSize(1, jid_);
13163          }
13164          if (((bitField0_ & 0x00000002) == 0x00000002)) {
13165            size += com.google.protobuf.CodedOutputStream
13166              .computeUInt64Size(2, sinceTxId_);
13167          }
13168          if (((bitField0_ & 0x00000004) == 0x00000004)) {
13169            size += com.google.protobuf.CodedOutputStream
13170              .computeBoolSize(4, inProgressOk_);
13171          }
13172          size += getUnknownFields().getSerializedSize();
13173          memoizedSerializedSize = size;
13174          return size;
13175        }
13176    
13177        private static final long serialVersionUID = 0L;
13178        @java.lang.Override
13179        protected java.lang.Object writeReplace()
13180            throws java.io.ObjectStreamException {
13181          return super.writeReplace();
13182        }
13183    
13184        @java.lang.Override
13185        public boolean equals(final java.lang.Object obj) {
13186          if (obj == this) {
13187           return true;
13188          }
13189          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)) {
13190            return super.equals(obj);
13191          }
13192          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto) obj;
13193    
13194          boolean result = true;
13195          result = result && (hasJid() == other.hasJid());
13196          if (hasJid()) {
13197            result = result && getJid()
13198                .equals(other.getJid());
13199          }
13200          result = result && (hasSinceTxId() == other.hasSinceTxId());
13201          if (hasSinceTxId()) {
13202            result = result && (getSinceTxId()
13203                == other.getSinceTxId());
13204          }
13205          result = result && (hasInProgressOk() == other.hasInProgressOk());
13206          if (hasInProgressOk()) {
13207            result = result && (getInProgressOk()
13208                == other.getInProgressOk());
13209          }
13210          result = result &&
13211              getUnknownFields().equals(other.getUnknownFields());
13212          return result;
13213        }
13214    
13215        private int memoizedHashCode = 0;
13216        @java.lang.Override
13217        public int hashCode() {
13218          if (memoizedHashCode != 0) {
13219            return memoizedHashCode;
13220          }
13221          int hash = 41;
13222          hash = (19 * hash) + getDescriptorForType().hashCode();
13223          if (hasJid()) {
13224            hash = (37 * hash) + JID_FIELD_NUMBER;
13225            hash = (53 * hash) + getJid().hashCode();
13226          }
13227          if (hasSinceTxId()) {
13228            hash = (37 * hash) + SINCETXID_FIELD_NUMBER;
13229            hash = (53 * hash) + hashLong(getSinceTxId());
13230          }
13231          if (hasInProgressOk()) {
13232            hash = (37 * hash) + INPROGRESSOK_FIELD_NUMBER;
13233            hash = (53 * hash) + hashBoolean(getInProgressOk());
13234          }
13235          hash = (29 * hash) + getUnknownFields().hashCode();
13236          memoizedHashCode = hash;
13237          return hash;
13238        }
13239    
13240        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
13241            com.google.protobuf.ByteString data)
13242            throws com.google.protobuf.InvalidProtocolBufferException {
13243          return PARSER.parseFrom(data);
13244        }
13245        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
13246            com.google.protobuf.ByteString data,
13247            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13248            throws com.google.protobuf.InvalidProtocolBufferException {
13249          return PARSER.parseFrom(data, extensionRegistry);
13250        }
13251        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(byte[] data)
13252            throws com.google.protobuf.InvalidProtocolBufferException {
13253          return PARSER.parseFrom(data);
13254        }
13255        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
13256            byte[] data,
13257            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13258            throws com.google.protobuf.InvalidProtocolBufferException {
13259          return PARSER.parseFrom(data, extensionRegistry);
13260        }
13261        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(java.io.InputStream input)
13262            throws java.io.IOException {
13263          return PARSER.parseFrom(input);
13264        }
13265        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
13266            java.io.InputStream input,
13267            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13268            throws java.io.IOException {
13269          return PARSER.parseFrom(input, extensionRegistry);
13270        }
13271        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseDelimitedFrom(java.io.InputStream input)
13272            throws java.io.IOException {
13273          return PARSER.parseDelimitedFrom(input);
13274        }
13275        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseDelimitedFrom(
13276            java.io.InputStream input,
13277            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13278            throws java.io.IOException {
13279          return PARSER.parseDelimitedFrom(input, extensionRegistry);
13280        }
13281        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
13282            com.google.protobuf.CodedInputStream input)
13283            throws java.io.IOException {
13284          return PARSER.parseFrom(input);
13285        }
13286        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
13287            com.google.protobuf.CodedInputStream input,
13288            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13289            throws java.io.IOException {
13290          return PARSER.parseFrom(input, extensionRegistry);
13291        }
13292    
13293        public static Builder newBuilder() { return Builder.create(); }
13294        public Builder newBuilderForType() { return newBuilder(); }
13295        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto prototype) {
13296          return newBuilder().mergeFrom(prototype);
13297        }
13298        public Builder toBuilder() { return newBuilder(this); }
13299    
13300        @java.lang.Override
13301        protected Builder newBuilderForType(
13302            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
13303          Builder builder = new Builder(parent);
13304          return builder;
13305        }
13306        /**
13307         * Protobuf type {@code hadoop.hdfs.GetEditLogManifestRequestProto}
13308         *
13309         * <pre>
13310         **
13311         * getEditLogManifest()
13312         * </pre>
13313         */
13314        public static final class Builder extends
13315            com.google.protobuf.GeneratedMessage.Builder<Builder>
13316           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProtoOrBuilder {
13317          public static final com.google.protobuf.Descriptors.Descriptor
13318              getDescriptor() {
13319            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor;
13320          }
13321    
13322          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
13323              internalGetFieldAccessorTable() {
13324            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_fieldAccessorTable
13325                .ensureFieldAccessorsInitialized(
13326                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.Builder.class);
13327          }
13328    
13329          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.newBuilder()
13330          private Builder() {
13331            maybeForceBuilderInitialization();
13332          }
13333    
13334          private Builder(
13335              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
13336            super(parent);
13337            maybeForceBuilderInitialization();
13338          }
13339          private void maybeForceBuilderInitialization() {
13340            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
13341              getJidFieldBuilder();
13342            }
13343          }
13344          private static Builder create() {
13345            return new Builder();
13346          }
13347    
13348          public Builder clear() {
13349            super.clear();
13350            if (jidBuilder_ == null) {
13351              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
13352            } else {
13353              jidBuilder_.clear();
13354            }
13355            bitField0_ = (bitField0_ & ~0x00000001);
13356            sinceTxId_ = 0L;
13357            bitField0_ = (bitField0_ & ~0x00000002);
13358            inProgressOk_ = false;
13359            bitField0_ = (bitField0_ & ~0x00000004);
13360            return this;
13361          }
13362    
13363          public Builder clone() {
13364            return create().mergeFrom(buildPartial());
13365          }
13366    
13367          public com.google.protobuf.Descriptors.Descriptor
13368              getDescriptorForType() {
13369            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor;
13370          }
13371    
13372          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto getDefaultInstanceForType() {
13373            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance();
13374          }
13375    
13376          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto build() {
13377            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto result = buildPartial();
13378            if (!result.isInitialized()) {
13379              throw newUninitializedMessageException(result);
13380            }
13381            return result;
13382          }
13383    
13384          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto buildPartial() {
13385            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto(this);
13386            int from_bitField0_ = bitField0_;
13387            int to_bitField0_ = 0;
13388            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
13389              to_bitField0_ |= 0x00000001;
13390            }
13391            if (jidBuilder_ == null) {
13392              result.jid_ = jid_;
13393            } else {
13394              result.jid_ = jidBuilder_.build();
13395            }
13396            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
13397              to_bitField0_ |= 0x00000002;
13398            }
13399            result.sinceTxId_ = sinceTxId_;
13400            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
13401              to_bitField0_ |= 0x00000004;
13402            }
13403            result.inProgressOk_ = inProgressOk_;
13404            result.bitField0_ = to_bitField0_;
13405            onBuilt();
13406            return result;
13407          }
13408    
13409          public Builder mergeFrom(com.google.protobuf.Message other) {
13410            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto) {
13411              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)other);
13412            } else {
13413              super.mergeFrom(other);
13414              return this;
13415            }
13416          }
13417    
13418          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto other) {
13419            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance()) return this;
13420            if (other.hasJid()) {
13421              mergeJid(other.getJid());
13422            }
13423            if (other.hasSinceTxId()) {
13424              setSinceTxId(other.getSinceTxId());
13425            }
13426            if (other.hasInProgressOk()) {
13427              setInProgressOk(other.getInProgressOk());
13428            }
13429            this.mergeUnknownFields(other.getUnknownFields());
13430            return this;
13431          }
13432    
13433          public final boolean isInitialized() {
13434            if (!hasJid()) {
13435              
13436              return false;
13437            }
13438            if (!hasSinceTxId()) {
13439              
13440              return false;
13441            }
13442            if (!getJid().isInitialized()) {
13443              
13444              return false;
13445            }
13446            return true;
13447          }
13448    
13449          public Builder mergeFrom(
13450              com.google.protobuf.CodedInputStream input,
13451              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13452              throws java.io.IOException {
13453            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parsedMessage = null;
13454            try {
13455              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
13456            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
13457              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto) e.getUnfinishedMessage();
13458              throw e;
13459            } finally {
13460              if (parsedMessage != null) {
13461                mergeFrom(parsedMessage);
13462              }
13463            }
13464            return this;
13465          }
13466          private int bitField0_;
13467    
13468          // required .hadoop.hdfs.JournalIdProto jid = 1;
13469          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
13470          private com.google.protobuf.SingleFieldBuilder<
13471              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
13472          /**
13473           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13474           */
13475          public boolean hasJid() {
13476            return ((bitField0_ & 0x00000001) == 0x00000001);
13477          }
13478          /**
13479           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13480           */
13481          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
13482            if (jidBuilder_ == null) {
13483              return jid_;
13484            } else {
13485              return jidBuilder_.getMessage();
13486            }
13487          }
13488          /**
13489           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13490           */
13491          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
13492            if (jidBuilder_ == null) {
13493              if (value == null) {
13494                throw new NullPointerException();
13495              }
13496              jid_ = value;
13497              onChanged();
13498            } else {
13499              jidBuilder_.setMessage(value);
13500            }
13501            bitField0_ |= 0x00000001;
13502            return this;
13503          }
13504          /**
13505           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13506           */
13507          public Builder setJid(
13508              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
13509            if (jidBuilder_ == null) {
13510              jid_ = builderForValue.build();
13511              onChanged();
13512            } else {
13513              jidBuilder_.setMessage(builderForValue.build());
13514            }
13515            bitField0_ |= 0x00000001;
13516            return this;
13517          }
13518          /**
13519           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13520           */
13521          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
13522            if (jidBuilder_ == null) {
13523              if (((bitField0_ & 0x00000001) == 0x00000001) &&
13524                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
13525                jid_ =
13526                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
13527              } else {
13528                jid_ = value;
13529              }
13530              onChanged();
13531            } else {
13532              jidBuilder_.mergeFrom(value);
13533            }
13534            bitField0_ |= 0x00000001;
13535            return this;
13536          }
13537          /**
13538           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13539           */
13540          public Builder clearJid() {
13541            if (jidBuilder_ == null) {
13542              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
13543              onChanged();
13544            } else {
13545              jidBuilder_.clear();
13546            }
13547            bitField0_ = (bitField0_ & ~0x00000001);
13548            return this;
13549          }
13550          /**
13551           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13552           */
13553          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
13554            bitField0_ |= 0x00000001;
13555            onChanged();
13556            return getJidFieldBuilder().getBuilder();
13557          }
13558          /**
13559           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13560           */
13561          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
13562            if (jidBuilder_ != null) {
13563              return jidBuilder_.getMessageOrBuilder();
13564            } else {
13565              return jid_;
13566            }
13567          }
13568          /**
13569           * <code>required .hadoop.hdfs.JournalIdProto jid = 1;</code>
13570           */
13571          private com.google.protobuf.SingleFieldBuilder<
13572              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
13573              getJidFieldBuilder() {
13574            if (jidBuilder_ == null) {
13575              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
13576                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
13577                      jid_,
13578                      getParentForChildren(),
13579                      isClean());
13580              jid_ = null;
13581            }
13582            return jidBuilder_;
13583          }
13584    
13585          // required uint64 sinceTxId = 2;
13586          private long sinceTxId_ ;
13587          /**
13588           * <code>required uint64 sinceTxId = 2;</code>
13589           *
13590           * <pre>
13591           * Transaction ID
13592           * </pre>
13593           */
13594          public boolean hasSinceTxId() {
13595            return ((bitField0_ & 0x00000002) == 0x00000002);
13596          }
13597          /**
13598           * <code>required uint64 sinceTxId = 2;</code>
13599           *
13600           * <pre>
13601           * Transaction ID
13602           * </pre>
13603           */
13604          public long getSinceTxId() {
13605            return sinceTxId_;
13606          }
13607          /**
13608           * <code>required uint64 sinceTxId = 2;</code>
13609           *
13610           * <pre>
13611           * Transaction ID
13612           * </pre>
13613           */
13614          public Builder setSinceTxId(long value) {
13615            bitField0_ |= 0x00000002;
13616            sinceTxId_ = value;
13617            onChanged();
13618            return this;
13619          }
13620          /**
13621           * <code>required uint64 sinceTxId = 2;</code>
13622           *
13623           * <pre>
13624           * Transaction ID
13625           * </pre>
13626           */
13627          public Builder clearSinceTxId() {
13628            bitField0_ = (bitField0_ & ~0x00000002);
13629            sinceTxId_ = 0L;
13630            onChanged();
13631            return this;
13632          }
13633    
13634          // optional bool inProgressOk = 4 [default = false];
13635          private boolean inProgressOk_ ;
13636          /**
13637           * <code>optional bool inProgressOk = 4 [default = false];</code>
13638           *
13639           * <pre>
13640           * Whether or not the client will be reading from the returned streams.
13641           * optional bool forReading = 3 [default = true]; &lt;obsolete, do not reuse&gt;
13642           * </pre>
13643           */
13644          public boolean hasInProgressOk() {
13645            return ((bitField0_ & 0x00000004) == 0x00000004);
13646          }
13647          /**
13648           * <code>optional bool inProgressOk = 4 [default = false];</code>
13649           *
13650           * <pre>
13651           * Whether or not the client will be reading from the returned streams.
13652           * optional bool forReading = 3 [default = true]; &lt;obsolete, do not reuse&gt;
13653           * </pre>
13654           */
13655          public boolean getInProgressOk() {
13656            return inProgressOk_;
13657          }
13658          /**
13659           * <code>optional bool inProgressOk = 4 [default = false];</code>
13660           *
13661           * <pre>
13662           * Whether or not the client will be reading from the returned streams.
13663           * optional bool forReading = 3 [default = true]; &lt;obsolete, do not reuse&gt;
13664           * </pre>
13665           */
13666          public Builder setInProgressOk(boolean value) {
13667            bitField0_ |= 0x00000004;
13668            inProgressOk_ = value;
13669            onChanged();
13670            return this;
13671          }
13672          /**
13673           * <code>optional bool inProgressOk = 4 [default = false];</code>
13674           *
13675           * <pre>
13676           * Whether or not the client will be reading from the returned streams.
13677           * optional bool forReading = 3 [default = true]; &lt;obsolete, do not reuse&gt;
13678           * </pre>
13679           */
13680          public Builder clearInProgressOk() {
13681            bitField0_ = (bitField0_ & ~0x00000004);
13682            inProgressOk_ = false;
13683            onChanged();
13684            return this;
13685          }
13686    
13687          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetEditLogManifestRequestProto)
13688        }
13689    
13690        static {
13691          defaultInstance = new GetEditLogManifestRequestProto(true);
13692          defaultInstance.initFields();
13693        }
13694    
13695        // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetEditLogManifestRequestProto)
13696      }
13697    
13698      public interface GetEditLogManifestResponseProtoOrBuilder
13699          extends com.google.protobuf.MessageOrBuilder {
13700    
13701        // required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;
13702        /**
13703         * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
13704         */
13705        boolean hasManifest();
13706        /**
13707         * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
13708         */
13709        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getManifest();
13710        /**
13711         * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
13712         */
13713        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder();
13714    
13715        // required uint32 httpPort = 2;
13716        /**
13717         * <code>required uint32 httpPort = 2;</code>
13718         *
13719         * <pre>
13720         * Deprecated by fromURL
13721         * </pre>
13722         */
13723        boolean hasHttpPort();
13724        /**
13725         * <code>required uint32 httpPort = 2;</code>
13726         *
13727         * <pre>
13728         * Deprecated by fromURL
13729         * </pre>
13730         */
13731        int getHttpPort();
13732    
13733        // optional string fromURL = 3;
13734        /**
13735         * <code>optional string fromURL = 3;</code>
13736         */
13737        boolean hasFromURL();
13738        /**
13739         * <code>optional string fromURL = 3;</code>
13740         */
13741        java.lang.String getFromURL();
13742        /**
13743         * <code>optional string fromURL = 3;</code>
13744         */
13745        com.google.protobuf.ByteString
13746            getFromURLBytes();
13747      }
13748      /**
13749       * Protobuf type {@code hadoop.hdfs.GetEditLogManifestResponseProto}
13750       */
13751      public static final class GetEditLogManifestResponseProto extends
13752          com.google.protobuf.GeneratedMessage
13753          implements GetEditLogManifestResponseProtoOrBuilder {
13754        // Use GetEditLogManifestResponseProto.newBuilder() to construct.
13755        private GetEditLogManifestResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
13756          super(builder);
13757          this.unknownFields = builder.getUnknownFields();
13758        }
13759        private GetEditLogManifestResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
13760    
13761        private static final GetEditLogManifestResponseProto defaultInstance;
13762        public static GetEditLogManifestResponseProto getDefaultInstance() {
13763          return defaultInstance;
13764        }
13765    
13766        public GetEditLogManifestResponseProto getDefaultInstanceForType() {
13767          return defaultInstance;
13768        }
13769    
13770        private final com.google.protobuf.UnknownFieldSet unknownFields;
13771        @java.lang.Override
13772        public final com.google.protobuf.UnknownFieldSet
13773            getUnknownFields() {
13774          return this.unknownFields;
13775        }
13776        private GetEditLogManifestResponseProto(
13777            com.google.protobuf.CodedInputStream input,
13778            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13779            throws com.google.protobuf.InvalidProtocolBufferException {
13780          initFields();
13781          int mutable_bitField0_ = 0;
13782          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
13783              com.google.protobuf.UnknownFieldSet.newBuilder();
13784          try {
13785            boolean done = false;
13786            while (!done) {
13787              int tag = input.readTag();
13788              switch (tag) {
13789                case 0:
13790                  done = true;
13791                  break;
13792                default: {
13793                  if (!parseUnknownField(input, unknownFields,
13794                                         extensionRegistry, tag)) {
13795                    done = true;
13796                  }
13797                  break;
13798                }
13799                case 10: {
13800                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder subBuilder = null;
13801                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
13802                    subBuilder = manifest_.toBuilder();
13803                  }
13804                  manifest_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.PARSER, extensionRegistry);
13805                  if (subBuilder != null) {
13806                    subBuilder.mergeFrom(manifest_);
13807                    manifest_ = subBuilder.buildPartial();
13808                  }
13809                  bitField0_ |= 0x00000001;
13810                  break;
13811                }
13812                case 16: {
13813                  bitField0_ |= 0x00000002;
13814                  httpPort_ = input.readUInt32();
13815                  break;
13816                }
13817                case 26: {
13818                  bitField0_ |= 0x00000004;
13819                  fromURL_ = input.readBytes();
13820                  break;
13821                }
13822              }
13823            }
13824          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
13825            throw e.setUnfinishedMessage(this);
13826          } catch (java.io.IOException e) {
13827            throw new com.google.protobuf.InvalidProtocolBufferException(
13828                e.getMessage()).setUnfinishedMessage(this);
13829          } finally {
13830            this.unknownFields = unknownFields.build();
13831            makeExtensionsImmutable();
13832          }
13833        }
13834        public static final com.google.protobuf.Descriptors.Descriptor
13835            getDescriptor() {
13836          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor;
13837        }
13838    
13839        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
13840            internalGetFieldAccessorTable() {
13841          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_fieldAccessorTable
13842              .ensureFieldAccessorsInitialized(
13843                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.Builder.class);
13844        }
13845    
13846        public static com.google.protobuf.Parser<GetEditLogManifestResponseProto> PARSER =
13847            new com.google.protobuf.AbstractParser<GetEditLogManifestResponseProto>() {
13848          public GetEditLogManifestResponseProto parsePartialFrom(
13849              com.google.protobuf.CodedInputStream input,
13850              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13851              throws com.google.protobuf.InvalidProtocolBufferException {
13852            return new GetEditLogManifestResponseProto(input, extensionRegistry);
13853          }
13854        };
13855    
13856        @java.lang.Override
13857        public com.google.protobuf.Parser<GetEditLogManifestResponseProto> getParserForType() {
13858          return PARSER;
13859        }
13860    
13861        private int bitField0_;
13862        // required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;
13863        public static final int MANIFEST_FIELD_NUMBER = 1;
13864        private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto manifest_;
13865        /**
13866         * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
13867         */
13868        public boolean hasManifest() {
13869          return ((bitField0_ & 0x00000001) == 0x00000001);
13870        }
13871        /**
13872         * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
13873         */
13874        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getManifest() {
13875          return manifest_;
13876        }
13877        /**
13878         * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
13879         */
13880        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder() {
13881          return manifest_;
13882        }
13883    
13884        // required uint32 httpPort = 2;
13885        public static final int HTTPPORT_FIELD_NUMBER = 2;
13886        private int httpPort_;
13887        /**
13888         * <code>required uint32 httpPort = 2;</code>
13889         *
13890         * <pre>
13891         * Deprecated by fromURL
13892         * </pre>
13893         */
13894        public boolean hasHttpPort() {
13895          return ((bitField0_ & 0x00000002) == 0x00000002);
13896        }
13897        /**
13898         * <code>required uint32 httpPort = 2;</code>
13899         *
13900         * <pre>
13901         * Deprecated by fromURL
13902         * </pre>
13903         */
13904        public int getHttpPort() {
13905          return httpPort_;
13906        }
13907    
13908        // optional string fromURL = 3;
13909        public static final int FROMURL_FIELD_NUMBER = 3;
13910        private java.lang.Object fromURL_;
13911        /**
13912         * <code>optional string fromURL = 3;</code>
13913         */
13914        public boolean hasFromURL() {
13915          return ((bitField0_ & 0x00000004) == 0x00000004);
13916        }
13917        /**
13918         * <code>optional string fromURL = 3;</code>
13919         */
13920        public java.lang.String getFromURL() {
13921          java.lang.Object ref = fromURL_;
13922          if (ref instanceof java.lang.String) {
13923            return (java.lang.String) ref;
13924          } else {
13925            com.google.protobuf.ByteString bs = 
13926                (com.google.protobuf.ByteString) ref;
13927            java.lang.String s = bs.toStringUtf8();
13928            if (bs.isValidUtf8()) {
13929              fromURL_ = s;
13930            }
13931            return s;
13932          }
13933        }
13934        /**
13935         * <code>optional string fromURL = 3;</code>
13936         */
13937        public com.google.protobuf.ByteString
13938            getFromURLBytes() {
13939          java.lang.Object ref = fromURL_;
13940          if (ref instanceof java.lang.String) {
13941            com.google.protobuf.ByteString b = 
13942                com.google.protobuf.ByteString.copyFromUtf8(
13943                    (java.lang.String) ref);
13944            fromURL_ = b;
13945            return b;
13946          } else {
13947            return (com.google.protobuf.ByteString) ref;
13948          }
13949        }
13950    
13951        private void initFields() {
13952          manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
13953          httpPort_ = 0;
13954          fromURL_ = "";
13955        }
13956        private byte memoizedIsInitialized = -1;
13957        public final boolean isInitialized() {
13958          byte isInitialized = memoizedIsInitialized;
13959          if (isInitialized != -1) return isInitialized == 1;
13960    
13961          if (!hasManifest()) {
13962            memoizedIsInitialized = 0;
13963            return false;
13964          }
13965          if (!hasHttpPort()) {
13966            memoizedIsInitialized = 0;
13967            return false;
13968          }
13969          if (!getManifest().isInitialized()) {
13970            memoizedIsInitialized = 0;
13971            return false;
13972          }
13973          memoizedIsInitialized = 1;
13974          return true;
13975        }
13976    
13977        public void writeTo(com.google.protobuf.CodedOutputStream output)
13978                            throws java.io.IOException {
13979          getSerializedSize();
13980          if (((bitField0_ & 0x00000001) == 0x00000001)) {
13981            output.writeMessage(1, manifest_);
13982          }
13983          if (((bitField0_ & 0x00000002) == 0x00000002)) {
13984            output.writeUInt32(2, httpPort_);
13985          }
13986          if (((bitField0_ & 0x00000004) == 0x00000004)) {
13987            output.writeBytes(3, getFromURLBytes());
13988          }
13989          getUnknownFields().writeTo(output);
13990        }
13991    
13992        private int memoizedSerializedSize = -1;
13993        public int getSerializedSize() {
13994          int size = memoizedSerializedSize;
13995          if (size != -1) return size;
13996    
13997          size = 0;
13998          if (((bitField0_ & 0x00000001) == 0x00000001)) {
13999            size += com.google.protobuf.CodedOutputStream
14000              .computeMessageSize(1, manifest_);
14001          }
14002          if (((bitField0_ & 0x00000002) == 0x00000002)) {
14003            size += com.google.protobuf.CodedOutputStream
14004              .computeUInt32Size(2, httpPort_);
14005          }
14006          if (((bitField0_ & 0x00000004) == 0x00000004)) {
14007            size += com.google.protobuf.CodedOutputStream
14008              .computeBytesSize(3, getFromURLBytes());
14009          }
14010          size += getUnknownFields().getSerializedSize();
14011          memoizedSerializedSize = size;
14012          return size;
14013        }
14014    
14015        private static final long serialVersionUID = 0L;
14016        @java.lang.Override
14017        protected java.lang.Object writeReplace()
14018            throws java.io.ObjectStreamException {
14019          return super.writeReplace();
14020        }
14021    
14022        @java.lang.Override
14023        public boolean equals(final java.lang.Object obj) {
14024          if (obj == this) {
14025           return true;
14026          }
14027          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto)) {
14028            return super.equals(obj);
14029          }
14030          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) obj;
14031    
14032          boolean result = true;
14033          result = result && (hasManifest() == other.hasManifest());
14034          if (hasManifest()) {
14035            result = result && getManifest()
14036                .equals(other.getManifest());
14037          }
14038          result = result && (hasHttpPort() == other.hasHttpPort());
14039          if (hasHttpPort()) {
14040            result = result && (getHttpPort()
14041                == other.getHttpPort());
14042          }
14043          result = result && (hasFromURL() == other.hasFromURL());
14044          if (hasFromURL()) {
14045            result = result && getFromURL()
14046                .equals(other.getFromURL());
14047          }
14048          result = result &&
14049              getUnknownFields().equals(other.getUnknownFields());
14050          return result;
14051        }
14052    
14053        private int memoizedHashCode = 0;
14054        @java.lang.Override
14055        public int hashCode() {
14056          if (memoizedHashCode != 0) {
14057            return memoizedHashCode;
14058          }
14059          int hash = 41;
14060          hash = (19 * hash) + getDescriptorForType().hashCode();
14061          if (hasManifest()) {
14062            hash = (37 * hash) + MANIFEST_FIELD_NUMBER;
14063            hash = (53 * hash) + getManifest().hashCode();
14064          }
14065          if (hasHttpPort()) {
14066            hash = (37 * hash) + HTTPPORT_FIELD_NUMBER;
14067            hash = (53 * hash) + getHttpPort();
14068          }
14069          if (hasFromURL()) {
14070            hash = (37 * hash) + FROMURL_FIELD_NUMBER;
14071            hash = (53 * hash) + getFromURL().hashCode();
14072          }
14073          hash = (29 * hash) + getUnknownFields().hashCode();
14074          memoizedHashCode = hash;
14075          return hash;
14076        }
14077    
14078        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
14079            com.google.protobuf.ByteString data)
14080            throws com.google.protobuf.InvalidProtocolBufferException {
14081          return PARSER.parseFrom(data);
14082        }
14083        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
14084            com.google.protobuf.ByteString data,
14085            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14086            throws com.google.protobuf.InvalidProtocolBufferException {
14087          return PARSER.parseFrom(data, extensionRegistry);
14088        }
14089        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(byte[] data)
14090            throws com.google.protobuf.InvalidProtocolBufferException {
14091          return PARSER.parseFrom(data);
14092        }
14093        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
14094            byte[] data,
14095            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14096            throws com.google.protobuf.InvalidProtocolBufferException {
14097          return PARSER.parseFrom(data, extensionRegistry);
14098        }
14099        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(java.io.InputStream input)
14100            throws java.io.IOException {
14101          return PARSER.parseFrom(input);
14102        }
14103        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
14104            java.io.InputStream input,
14105            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14106            throws java.io.IOException {
14107          return PARSER.parseFrom(input, extensionRegistry);
14108        }
14109        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseDelimitedFrom(java.io.InputStream input)
14110            throws java.io.IOException {
14111          return PARSER.parseDelimitedFrom(input);
14112        }
14113        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseDelimitedFrom(
14114            java.io.InputStream input,
14115            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14116            throws java.io.IOException {
14117          return PARSER.parseDelimitedFrom(input, extensionRegistry);
14118        }
14119        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
14120            com.google.protobuf.CodedInputStream input)
14121            throws java.io.IOException {
14122          return PARSER.parseFrom(input);
14123        }
14124        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
14125            com.google.protobuf.CodedInputStream input,
14126            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14127            throws java.io.IOException {
14128          return PARSER.parseFrom(input, extensionRegistry);
14129        }
14130    
14131        public static Builder newBuilder() { return Builder.create(); }
14132        public Builder newBuilderForType() { return newBuilder(); }
14133        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto prototype) {
14134          return newBuilder().mergeFrom(prototype);
14135        }
14136        public Builder toBuilder() { return newBuilder(this); }
14137    
14138        @java.lang.Override
14139        protected Builder newBuilderForType(
14140            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
14141          Builder builder = new Builder(parent);
14142          return builder;
14143        }
14144        /**
14145         * Protobuf type {@code hadoop.hdfs.GetEditLogManifestResponseProto}
14146         */
14147        public static final class Builder extends
14148            com.google.protobuf.GeneratedMessage.Builder<Builder>
14149           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProtoOrBuilder {
14150          public static final com.google.protobuf.Descriptors.Descriptor
14151              getDescriptor() {
14152            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor;
14153          }
14154    
14155          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
14156              internalGetFieldAccessorTable() {
14157            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_fieldAccessorTable
14158                .ensureFieldAccessorsInitialized(
14159                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.Builder.class);
14160          }
14161    
14162          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.newBuilder()
14163          private Builder() {
14164            maybeForceBuilderInitialization();
14165          }
14166    
14167          private Builder(
14168              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
14169            super(parent);
14170            maybeForceBuilderInitialization();
14171          }
14172          private void maybeForceBuilderInitialization() {
14173            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
14174              getManifestFieldBuilder();
14175            }
14176          }
14177          private static Builder create() {
14178            return new Builder();
14179          }
14180    
14181          public Builder clear() {
14182            super.clear();
14183            if (manifestBuilder_ == null) {
14184              manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
14185            } else {
14186              manifestBuilder_.clear();
14187            }
14188            bitField0_ = (bitField0_ & ~0x00000001);
14189            httpPort_ = 0;
14190            bitField0_ = (bitField0_ & ~0x00000002);
14191            fromURL_ = "";
14192            bitField0_ = (bitField0_ & ~0x00000004);
14193            return this;
14194          }
14195    
14196          public Builder clone() {
14197            return create().mergeFrom(buildPartial());
14198          }
14199    
14200          public com.google.protobuf.Descriptors.Descriptor
14201              getDescriptorForType() {
14202            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor;
14203          }
14204    
14205          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto getDefaultInstanceForType() {
14206            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance();
14207          }
14208    
14209          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto build() {
14210            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto result = buildPartial();
14211            if (!result.isInitialized()) {
14212              throw newUninitializedMessageException(result);
14213            }
14214            return result;
14215          }
14216    
14217          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto buildPartial() {
14218            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto(this);
14219            int from_bitField0_ = bitField0_;
14220            int to_bitField0_ = 0;
14221            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
14222              to_bitField0_ |= 0x00000001;
14223            }
14224            if (manifestBuilder_ == null) {
14225              result.manifest_ = manifest_;
14226            } else {
14227              result.manifest_ = manifestBuilder_.build();
14228            }
14229            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
14230              to_bitField0_ |= 0x00000002;
14231            }
14232            result.httpPort_ = httpPort_;
14233            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
14234              to_bitField0_ |= 0x00000004;
14235            }
14236            result.fromURL_ = fromURL_;
14237            result.bitField0_ = to_bitField0_;
14238            onBuilt();
14239            return result;
14240          }
14241    
14242          public Builder mergeFrom(com.google.protobuf.Message other) {
14243            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) {
14244              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto)other);
14245            } else {
14246              super.mergeFrom(other);
14247              return this;
14248            }
14249          }
14250    
14251          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto other) {
14252            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance()) return this;
14253            if (other.hasManifest()) {
14254              mergeManifest(other.getManifest());
14255            }
14256            if (other.hasHttpPort()) {
14257              setHttpPort(other.getHttpPort());
14258            }
14259            if (other.hasFromURL()) {
14260              bitField0_ |= 0x00000004;
14261              fromURL_ = other.fromURL_;
14262              onChanged();
14263            }
14264            this.mergeUnknownFields(other.getUnknownFields());
14265            return this;
14266          }
14267    
14268          public final boolean isInitialized() {
14269            if (!hasManifest()) {
14270              
14271              return false;
14272            }
14273            if (!hasHttpPort()) {
14274              
14275              return false;
14276            }
14277            if (!getManifest().isInitialized()) {
14278              
14279              return false;
14280            }
14281            return true;
14282          }
14283    
14284          public Builder mergeFrom(
14285              com.google.protobuf.CodedInputStream input,
14286              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14287              throws java.io.IOException {
14288            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parsedMessage = null;
14289            try {
14290              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
14291            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
14292              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) e.getUnfinishedMessage();
14293              throw e;
14294            } finally {
14295              if (parsedMessage != null) {
14296                mergeFrom(parsedMessage);
14297              }
14298            }
14299            return this;
14300          }
14301          private int bitField0_;
14302    
14303          // required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;
14304          private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
14305          private com.google.protobuf.SingleFieldBuilder<
14306              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder> manifestBuilder_;
14307          /**
14308           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14309           */
14310          public boolean hasManifest() {
14311            return ((bitField0_ & 0x00000001) == 0x00000001);
14312          }
14313          /**
14314           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14315           */
14316          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getManifest() {
14317            if (manifestBuilder_ == null) {
14318              return manifest_;
14319            } else {
14320              return manifestBuilder_.getMessage();
14321            }
14322          }
14323          /**
14324           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14325           */
14326          public Builder setManifest(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto value) {
14327            if (manifestBuilder_ == null) {
14328              if (value == null) {
14329                throw new NullPointerException();
14330              }
14331              manifest_ = value;
14332              onChanged();
14333            } else {
14334              manifestBuilder_.setMessage(value);
14335            }
14336            bitField0_ |= 0x00000001;
14337            return this;
14338          }
14339          /**
14340           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14341           */
14342          public Builder setManifest(
14343              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder builderForValue) {
14344            if (manifestBuilder_ == null) {
14345              manifest_ = builderForValue.build();
14346              onChanged();
14347            } else {
14348              manifestBuilder_.setMessage(builderForValue.build());
14349            }
14350            bitField0_ |= 0x00000001;
14351            return this;
14352          }
14353          /**
14354           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14355           */
14356          public Builder mergeManifest(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto value) {
14357            if (manifestBuilder_ == null) {
14358              if (((bitField0_ & 0x00000001) == 0x00000001) &&
14359                  manifest_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance()) {
14360                manifest_ =
14361                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.newBuilder(manifest_).mergeFrom(value).buildPartial();
14362              } else {
14363                manifest_ = value;
14364              }
14365              onChanged();
14366            } else {
14367              manifestBuilder_.mergeFrom(value);
14368            }
14369            bitField0_ |= 0x00000001;
14370            return this;
14371          }
14372          /**
14373           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14374           */
14375          public Builder clearManifest() {
14376            if (manifestBuilder_ == null) {
14377              manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
14378              onChanged();
14379            } else {
14380              manifestBuilder_.clear();
14381            }
14382            bitField0_ = (bitField0_ & ~0x00000001);
14383            return this;
14384          }
14385          /**
14386           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14387           */
14388          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder getManifestBuilder() {
14389            bitField0_ |= 0x00000001;
14390            onChanged();
14391            return getManifestFieldBuilder().getBuilder();
14392          }
14393          /**
14394           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14395           */
14396          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder() {
14397            if (manifestBuilder_ != null) {
14398              return manifestBuilder_.getMessageOrBuilder();
14399            } else {
14400              return manifest_;
14401            }
14402          }
14403          /**
14404           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
14405           */
14406          private com.google.protobuf.SingleFieldBuilder<
14407              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder> 
14408              getManifestFieldBuilder() {
14409            if (manifestBuilder_ == null) {
14410              manifestBuilder_ = new com.google.protobuf.SingleFieldBuilder<
14411                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder>(
14412                      manifest_,
14413                      getParentForChildren(),
14414                      isClean());
14415              manifest_ = null;
14416            }
14417            return manifestBuilder_;
14418          }
14419    
14420          // required uint32 httpPort = 2;
14421          private int httpPort_ ;
14422          /**
14423           * <code>required uint32 httpPort = 2;</code>
14424           *
14425           * <pre>
14426           * Deprecated by fromURL
14427           * </pre>
14428           */
14429          public boolean hasHttpPort() {
14430            return ((bitField0_ & 0x00000002) == 0x00000002);
14431          }
14432          /**
14433           * <code>required uint32 httpPort = 2;</code>
14434           *
14435           * <pre>
14436           * Deprecated by fromURL
14437           * </pre>
14438           */
14439          public int getHttpPort() {
14440            return httpPort_;
14441          }
14442          /**
14443           * <code>required uint32 httpPort = 2;</code>
14444           *
14445           * <pre>
14446           * Deprecated by fromURL
14447           * </pre>
14448           */
14449          public Builder setHttpPort(int value) {
14450            bitField0_ |= 0x00000002;
14451            httpPort_ = value;
14452            onChanged();
14453            return this;
14454          }
14455          /**
14456           * <code>required uint32 httpPort = 2;</code>
14457           *
14458           * <pre>
14459           * Deprecated by fromURL
14460           * </pre>
14461           */
14462          public Builder clearHttpPort() {
14463            bitField0_ = (bitField0_ & ~0x00000002);
14464            httpPort_ = 0;
14465            onChanged();
14466            return this;
14467          }
14468    
14469          // optional string fromURL = 3;
14470          private java.lang.Object fromURL_ = "";
14471          /**
14472           * <code>optional string fromURL = 3;</code>
14473           */
14474          public boolean hasFromURL() {
14475            return ((bitField0_ & 0x00000004) == 0x00000004);
14476          }
14477          /**
14478           * <code>optional string fromURL = 3;</code>
14479           */
14480          public java.lang.String getFromURL() {
14481            java.lang.Object ref = fromURL_;
14482            if (!(ref instanceof java.lang.String)) {
14483              java.lang.String s = ((com.google.protobuf.ByteString) ref)
14484                  .toStringUtf8();
14485              fromURL_ = s;
14486              return s;
14487            } else {
14488              return (java.lang.String) ref;
14489            }
14490          }
14491          /**
14492           * <code>optional string fromURL = 3;</code>
14493           */
14494          public com.google.protobuf.ByteString
14495              getFromURLBytes() {
14496            java.lang.Object ref = fromURL_;
14497            if (ref instanceof String) {
14498              com.google.protobuf.ByteString b = 
14499                  com.google.protobuf.ByteString.copyFromUtf8(
14500                      (java.lang.String) ref);
14501              fromURL_ = b;
14502              return b;
14503            } else {
14504              return (com.google.protobuf.ByteString) ref;
14505            }
14506          }
14507          /**
14508           * <code>optional string fromURL = 3;</code>
14509           */
14510          public Builder setFromURL(
14511              java.lang.String value) {
14512            if (value == null) {
14513        throw new NullPointerException();
14514      }
14515      bitField0_ |= 0x00000004;
14516            fromURL_ = value;
14517            onChanged();
14518            return this;
14519          }
14520          /**
14521           * <code>optional string fromURL = 3;</code>
14522           */
14523          public Builder clearFromURL() {
14524            bitField0_ = (bitField0_ & ~0x00000004);
14525            fromURL_ = getDefaultInstance().getFromURL();
14526            onChanged();
14527            return this;
14528          }
14529          /**
14530           * <code>optional string fromURL = 3;</code>
14531           */
14532          public Builder setFromURLBytes(
14533              com.google.protobuf.ByteString value) {
14534            if (value == null) {
14535        throw new NullPointerException();
14536      }
14537      bitField0_ |= 0x00000004;
14538            fromURL_ = value;
14539            onChanged();
14540            return this;
14541          }
14542    
14543          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetEditLogManifestResponseProto)
14544        }
14545    
14546        static {
14547          defaultInstance = new GetEditLogManifestResponseProto(true);
14548          defaultInstance.initFields();
14549        }
14550    
14551        // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetEditLogManifestResponseProto)
14552      }
14553    
14554      public interface PrepareRecoveryRequestProtoOrBuilder
14555          extends com.google.protobuf.MessageOrBuilder {
14556    
14557        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
14558        /**
14559         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14560         */
14561        boolean hasReqInfo();
14562        /**
14563         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14564         */
14565        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
14566        /**
14567         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14568         */
14569        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
14570    
14571        // required uint64 segmentTxId = 2;
14572        /**
14573         * <code>required uint64 segmentTxId = 2;</code>
14574         */
14575        boolean hasSegmentTxId();
14576        /**
14577         * <code>required uint64 segmentTxId = 2;</code>
14578         */
14579        long getSegmentTxId();
14580      }
14581      /**
14582       * Protobuf type {@code hadoop.hdfs.PrepareRecoveryRequestProto}
14583       *
14584       * <pre>
14585       **
14586       * prepareRecovery()
14587       * </pre>
14588       */
14589      public static final class PrepareRecoveryRequestProto extends
14590          com.google.protobuf.GeneratedMessage
14591          implements PrepareRecoveryRequestProtoOrBuilder {
14592        // Use PrepareRecoveryRequestProto.newBuilder() to construct.
14593        private PrepareRecoveryRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
14594          super(builder);
14595          this.unknownFields = builder.getUnknownFields();
14596        }
14597        private PrepareRecoveryRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
14598    
14599        private static final PrepareRecoveryRequestProto defaultInstance;
14600        public static PrepareRecoveryRequestProto getDefaultInstance() {
14601          return defaultInstance;
14602        }
14603    
14604        public PrepareRecoveryRequestProto getDefaultInstanceForType() {
14605          return defaultInstance;
14606        }
14607    
14608        private final com.google.protobuf.UnknownFieldSet unknownFields;
14609        @java.lang.Override
14610        public final com.google.protobuf.UnknownFieldSet
14611            getUnknownFields() {
14612          return this.unknownFields;
14613        }
14614        private PrepareRecoveryRequestProto(
14615            com.google.protobuf.CodedInputStream input,
14616            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14617            throws com.google.protobuf.InvalidProtocolBufferException {
14618          initFields();
14619          int mutable_bitField0_ = 0;
14620          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
14621              com.google.protobuf.UnknownFieldSet.newBuilder();
14622          try {
14623            boolean done = false;
14624            while (!done) {
14625              int tag = input.readTag();
14626              switch (tag) {
14627                case 0:
14628                  done = true;
14629                  break;
14630                default: {
14631                  if (!parseUnknownField(input, unknownFields,
14632                                         extensionRegistry, tag)) {
14633                    done = true;
14634                  }
14635                  break;
14636                }
14637                case 10: {
14638                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
14639                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
14640                    subBuilder = reqInfo_.toBuilder();
14641                  }
14642                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
14643                  if (subBuilder != null) {
14644                    subBuilder.mergeFrom(reqInfo_);
14645                    reqInfo_ = subBuilder.buildPartial();
14646                  }
14647                  bitField0_ |= 0x00000001;
14648                  break;
14649                }
14650                case 16: {
14651                  bitField0_ |= 0x00000002;
14652                  segmentTxId_ = input.readUInt64();
14653                  break;
14654                }
14655              }
14656            }
14657          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
14658            throw e.setUnfinishedMessage(this);
14659          } catch (java.io.IOException e) {
14660            throw new com.google.protobuf.InvalidProtocolBufferException(
14661                e.getMessage()).setUnfinishedMessage(this);
14662          } finally {
14663            this.unknownFields = unknownFields.build();
14664            makeExtensionsImmutable();
14665          }
14666        }
14667        public static final com.google.protobuf.Descriptors.Descriptor
14668            getDescriptor() {
14669          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor;
14670        }
14671    
14672        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
14673            internalGetFieldAccessorTable() {
14674          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_fieldAccessorTable
14675              .ensureFieldAccessorsInitialized(
14676                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.Builder.class);
14677        }
14678    
14679        public static com.google.protobuf.Parser<PrepareRecoveryRequestProto> PARSER =
14680            new com.google.protobuf.AbstractParser<PrepareRecoveryRequestProto>() {
14681          public PrepareRecoveryRequestProto parsePartialFrom(
14682              com.google.protobuf.CodedInputStream input,
14683              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14684              throws com.google.protobuf.InvalidProtocolBufferException {
14685            return new PrepareRecoveryRequestProto(input, extensionRegistry);
14686          }
14687        };
14688    
14689        @java.lang.Override
14690        public com.google.protobuf.Parser<PrepareRecoveryRequestProto> getParserForType() {
14691          return PARSER;
14692        }
14693    
14694        private int bitField0_;
14695        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
14696        public static final int REQINFO_FIELD_NUMBER = 1;
14697        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
14698        /**
14699         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14700         */
14701        public boolean hasReqInfo() {
14702          return ((bitField0_ & 0x00000001) == 0x00000001);
14703        }
14704        /**
14705         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14706         */
14707        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
14708          return reqInfo_;
14709        }
14710        /**
14711         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
14712         */
14713        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
14714          return reqInfo_;
14715        }
14716    
14717        // required uint64 segmentTxId = 2;
14718        public static final int SEGMENTTXID_FIELD_NUMBER = 2;
14719        private long segmentTxId_;
14720        /**
14721         * <code>required uint64 segmentTxId = 2;</code>
14722         */
14723        public boolean hasSegmentTxId() {
14724          return ((bitField0_ & 0x00000002) == 0x00000002);
14725        }
14726        /**
14727         * <code>required uint64 segmentTxId = 2;</code>
14728         */
14729        public long getSegmentTxId() {
14730          return segmentTxId_;
14731        }
14732    
14733        private void initFields() {
14734          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
14735          segmentTxId_ = 0L;
14736        }
14737        private byte memoizedIsInitialized = -1;
14738        public final boolean isInitialized() {
14739          byte isInitialized = memoizedIsInitialized;
14740          if (isInitialized != -1) return isInitialized == 1;
14741    
14742          if (!hasReqInfo()) {
14743            memoizedIsInitialized = 0;
14744            return false;
14745          }
14746          if (!hasSegmentTxId()) {
14747            memoizedIsInitialized = 0;
14748            return false;
14749          }
14750          if (!getReqInfo().isInitialized()) {
14751            memoizedIsInitialized = 0;
14752            return false;
14753          }
14754          memoizedIsInitialized = 1;
14755          return true;
14756        }
14757    
14758        public void writeTo(com.google.protobuf.CodedOutputStream output)
14759                            throws java.io.IOException {
14760          getSerializedSize();
14761          if (((bitField0_ & 0x00000001) == 0x00000001)) {
14762            output.writeMessage(1, reqInfo_);
14763          }
14764          if (((bitField0_ & 0x00000002) == 0x00000002)) {
14765            output.writeUInt64(2, segmentTxId_);
14766          }
14767          getUnknownFields().writeTo(output);
14768        }
14769    
14770        private int memoizedSerializedSize = -1;
14771        public int getSerializedSize() {
14772          int size = memoizedSerializedSize;
14773          if (size != -1) return size;
14774    
14775          size = 0;
14776          if (((bitField0_ & 0x00000001) == 0x00000001)) {
14777            size += com.google.protobuf.CodedOutputStream
14778              .computeMessageSize(1, reqInfo_);
14779          }
14780          if (((bitField0_ & 0x00000002) == 0x00000002)) {
14781            size += com.google.protobuf.CodedOutputStream
14782              .computeUInt64Size(2, segmentTxId_);
14783          }
14784          size += getUnknownFields().getSerializedSize();
14785          memoizedSerializedSize = size;
14786          return size;
14787        }
14788    
14789        private static final long serialVersionUID = 0L;
14790        @java.lang.Override
14791        protected java.lang.Object writeReplace()
14792            throws java.io.ObjectStreamException {
14793          return super.writeReplace();
14794        }
14795    
14796        @java.lang.Override
14797        public boolean equals(final java.lang.Object obj) {
14798          if (obj == this) {
14799           return true;
14800          }
14801          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)) {
14802            return super.equals(obj);
14803          }
14804          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto) obj;
14805    
14806          boolean result = true;
14807          result = result && (hasReqInfo() == other.hasReqInfo());
14808          if (hasReqInfo()) {
14809            result = result && getReqInfo()
14810                .equals(other.getReqInfo());
14811          }
14812          result = result && (hasSegmentTxId() == other.hasSegmentTxId());
14813          if (hasSegmentTxId()) {
14814            result = result && (getSegmentTxId()
14815                == other.getSegmentTxId());
14816          }
14817          result = result &&
14818              getUnknownFields().equals(other.getUnknownFields());
14819          return result;
14820        }
14821    
14822        private int memoizedHashCode = 0;
14823        @java.lang.Override
14824        public int hashCode() {
14825          if (memoizedHashCode != 0) {
14826            return memoizedHashCode;
14827          }
14828          int hash = 41;
14829          hash = (19 * hash) + getDescriptorForType().hashCode();
14830          if (hasReqInfo()) {
14831            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
14832            hash = (53 * hash) + getReqInfo().hashCode();
14833          }
14834          if (hasSegmentTxId()) {
14835            hash = (37 * hash) + SEGMENTTXID_FIELD_NUMBER;
14836            hash = (53 * hash) + hashLong(getSegmentTxId());
14837          }
14838          hash = (29 * hash) + getUnknownFields().hashCode();
14839          memoizedHashCode = hash;
14840          return hash;
14841        }
14842    
14843        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
14844            com.google.protobuf.ByteString data)
14845            throws com.google.protobuf.InvalidProtocolBufferException {
14846          return PARSER.parseFrom(data);
14847        }
14848        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
14849            com.google.protobuf.ByteString data,
14850            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14851            throws com.google.protobuf.InvalidProtocolBufferException {
14852          return PARSER.parseFrom(data, extensionRegistry);
14853        }
14854        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(byte[] data)
14855            throws com.google.protobuf.InvalidProtocolBufferException {
14856          return PARSER.parseFrom(data);
14857        }
14858        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
14859            byte[] data,
14860            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14861            throws com.google.protobuf.InvalidProtocolBufferException {
14862          return PARSER.parseFrom(data, extensionRegistry);
14863        }
14864        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(java.io.InputStream input)
14865            throws java.io.IOException {
14866          return PARSER.parseFrom(input);
14867        }
14868        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
14869            java.io.InputStream input,
14870            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14871            throws java.io.IOException {
14872          return PARSER.parseFrom(input, extensionRegistry);
14873        }
14874        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseDelimitedFrom(java.io.InputStream input)
14875            throws java.io.IOException {
14876          return PARSER.parseDelimitedFrom(input);
14877        }
14878        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseDelimitedFrom(
14879            java.io.InputStream input,
14880            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14881            throws java.io.IOException {
14882          return PARSER.parseDelimitedFrom(input, extensionRegistry);
14883        }
14884        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
14885            com.google.protobuf.CodedInputStream input)
14886            throws java.io.IOException {
14887          return PARSER.parseFrom(input);
14888        }
14889        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
14890            com.google.protobuf.CodedInputStream input,
14891            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14892            throws java.io.IOException {
14893          return PARSER.parseFrom(input, extensionRegistry);
14894        }
14895    
14896        public static Builder newBuilder() { return Builder.create(); }
14897        public Builder newBuilderForType() { return newBuilder(); }
14898        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto prototype) {
14899          return newBuilder().mergeFrom(prototype);
14900        }
14901        public Builder toBuilder() { return newBuilder(this); }
14902    
14903        @java.lang.Override
14904        protected Builder newBuilderForType(
14905            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
14906          Builder builder = new Builder(parent);
14907          return builder;
14908        }
14909        /**
14910         * Protobuf type {@code hadoop.hdfs.PrepareRecoveryRequestProto}
14911         *
14912         * <pre>
14913         **
14914         * prepareRecovery()
14915         * </pre>
14916         */
14917        public static final class Builder extends
14918            com.google.protobuf.GeneratedMessage.Builder<Builder>
14919           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProtoOrBuilder {
14920          public static final com.google.protobuf.Descriptors.Descriptor
14921              getDescriptor() {
14922            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor;
14923          }
14924    
14925          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
14926              internalGetFieldAccessorTable() {
14927            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_fieldAccessorTable
14928                .ensureFieldAccessorsInitialized(
14929                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.Builder.class);
14930          }
14931    
14932          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.newBuilder()
14933          private Builder() {
14934            maybeForceBuilderInitialization();
14935          }
14936    
14937          private Builder(
14938              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
14939            super(parent);
14940            maybeForceBuilderInitialization();
14941          }
14942          private void maybeForceBuilderInitialization() {
14943            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
14944              getReqInfoFieldBuilder();
14945            }
14946          }
14947          private static Builder create() {
14948            return new Builder();
14949          }
14950    
14951          public Builder clear() {
14952            super.clear();
14953            if (reqInfoBuilder_ == null) {
14954              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
14955            } else {
14956              reqInfoBuilder_.clear();
14957            }
14958            bitField0_ = (bitField0_ & ~0x00000001);
14959            segmentTxId_ = 0L;
14960            bitField0_ = (bitField0_ & ~0x00000002);
14961            return this;
14962          }
14963    
14964          public Builder clone() {
14965            return create().mergeFrom(buildPartial());
14966          }
14967    
14968          public com.google.protobuf.Descriptors.Descriptor
14969              getDescriptorForType() {
14970            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor;
14971          }
14972    
14973          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto getDefaultInstanceForType() {
14974            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance();
14975          }
14976    
14977          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto build() {
14978            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto result = buildPartial();
14979            if (!result.isInitialized()) {
14980              throw newUninitializedMessageException(result);
14981            }
14982            return result;
14983          }
14984    
14985          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto buildPartial() {
14986            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto(this);
14987            int from_bitField0_ = bitField0_;
14988            int to_bitField0_ = 0;
14989            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
14990              to_bitField0_ |= 0x00000001;
14991            }
14992            if (reqInfoBuilder_ == null) {
14993              result.reqInfo_ = reqInfo_;
14994            } else {
14995              result.reqInfo_ = reqInfoBuilder_.build();
14996            }
14997            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
14998              to_bitField0_ |= 0x00000002;
14999            }
15000            result.segmentTxId_ = segmentTxId_;
15001            result.bitField0_ = to_bitField0_;
15002            onBuilt();
15003            return result;
15004          }
15005    
15006          public Builder mergeFrom(com.google.protobuf.Message other) {
15007            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto) {
15008              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)other);
15009            } else {
15010              super.mergeFrom(other);
15011              return this;
15012            }
15013          }
15014    
15015          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto other) {
15016            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance()) return this;
15017            if (other.hasReqInfo()) {
15018              mergeReqInfo(other.getReqInfo());
15019            }
15020            if (other.hasSegmentTxId()) {
15021              setSegmentTxId(other.getSegmentTxId());
15022            }
15023            this.mergeUnknownFields(other.getUnknownFields());
15024            return this;
15025          }
15026    
15027          public final boolean isInitialized() {
15028            if (!hasReqInfo()) {
15029              
15030              return false;
15031            }
15032            if (!hasSegmentTxId()) {
15033              
15034              return false;
15035            }
15036            if (!getReqInfo().isInitialized()) {
15037              
15038              return false;
15039            }
15040            return true;
15041          }
15042    
15043          public Builder mergeFrom(
15044              com.google.protobuf.CodedInputStream input,
15045              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15046              throws java.io.IOException {
15047            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parsedMessage = null;
15048            try {
15049              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
15050            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
15051              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto) e.getUnfinishedMessage();
15052              throw e;
15053            } finally {
15054              if (parsedMessage != null) {
15055                mergeFrom(parsedMessage);
15056              }
15057            }
15058            return this;
15059          }
15060          private int bitField0_;
15061    
15062          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
15063          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
15064          private com.google.protobuf.SingleFieldBuilder<
15065              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
15066          /**
15067           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
15068           */
15069          public boolean hasReqInfo() {
15070            return ((bitField0_ & 0x00000001) == 0x00000001);
15071          }
15072          /**
15073           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
15074           */
15075          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
15076            if (reqInfoBuilder_ == null) {
15077              return reqInfo_;
15078            } else {
15079              return reqInfoBuilder_.getMessage();
15080            }
15081          }
15082          /**
15083           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
15084           */
15085          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
15086            if (reqInfoBuilder_ == null) {
15087              if (value == null) {
15088                throw new NullPointerException();
15089              }
15090              reqInfo_ = value;
15091              onChanged();
15092            } else {
15093              reqInfoBuilder_.setMessage(value);
15094            }
15095            bitField0_ |= 0x00000001;
15096            return this;
15097          }
15098          /**
15099           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
15100           */
15101          public Builder setReqInfo(
15102              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
15103            if (reqInfoBuilder_ == null) {
15104              reqInfo_ = builderForValue.build();
15105              onChanged();
15106            } else {
15107              reqInfoBuilder_.setMessage(builderForValue.build());
15108            }
15109            bitField0_ |= 0x00000001;
15110            return this;
15111          }
15112          /**
15113           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
15114           */
15115          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
15116            if (reqInfoBuilder_ == null) {
15117              if (((bitField0_ & 0x00000001) == 0x00000001) &&
15118                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
15119                reqInfo_ =
15120                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
15121              } else {
15122                reqInfo_ = value;
15123              }
15124              onChanged();
15125            } else {
15126              reqInfoBuilder_.mergeFrom(value);
15127            }
15128            bitField0_ |= 0x00000001;
15129            return this;
15130          }
15131          /**
15132           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
15133           */
15134          public Builder clearReqInfo() {
15135            if (reqInfoBuilder_ == null) {
15136              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
15137              onChanged();
15138            } else {
15139              reqInfoBuilder_.clear();
15140            }
15141            bitField0_ = (bitField0_ & ~0x00000001);
15142            return this;
15143          }
15144          /**
15145           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
15146           */
15147          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
15148            bitField0_ |= 0x00000001;
15149            onChanged();
15150            return getReqInfoFieldBuilder().getBuilder();
15151          }
15152          /**
15153           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
15154           */
15155          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
15156            if (reqInfoBuilder_ != null) {
15157              return reqInfoBuilder_.getMessageOrBuilder();
15158            } else {
15159              return reqInfo_;
15160            }
15161          }
15162          /**
15163           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
15164           */
15165          private com.google.protobuf.SingleFieldBuilder<
15166              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
15167              getReqInfoFieldBuilder() {
15168            if (reqInfoBuilder_ == null) {
15169              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
15170                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
15171                      reqInfo_,
15172                      getParentForChildren(),
15173                      isClean());
15174              reqInfo_ = null;
15175            }
15176            return reqInfoBuilder_;
15177          }
15178    
15179          // required uint64 segmentTxId = 2;
15180          private long segmentTxId_ ;
15181          /**
15182           * <code>required uint64 segmentTxId = 2;</code>
15183           */
15184          public boolean hasSegmentTxId() {
15185            return ((bitField0_ & 0x00000002) == 0x00000002);
15186          }
15187          /**
15188           * <code>required uint64 segmentTxId = 2;</code>
15189           */
15190          public long getSegmentTxId() {
15191            return segmentTxId_;
15192          }
15193          /**
15194           * <code>required uint64 segmentTxId = 2;</code>
15195           */
15196          public Builder setSegmentTxId(long value) {
15197            bitField0_ |= 0x00000002;
15198            segmentTxId_ = value;
15199            onChanged();
15200            return this;
15201          }
15202          /**
15203           * <code>required uint64 segmentTxId = 2;</code>
15204           */
15205          public Builder clearSegmentTxId() {
15206            bitField0_ = (bitField0_ & ~0x00000002);
15207            segmentTxId_ = 0L;
15208            onChanged();
15209            return this;
15210          }
15211    
15212          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PrepareRecoveryRequestProto)
15213        }
15214    
15215        static {
15216          defaultInstance = new PrepareRecoveryRequestProto(true);
15217          defaultInstance.initFields();
15218        }
15219    
15220        // @@protoc_insertion_point(class_scope:hadoop.hdfs.PrepareRecoveryRequestProto)
15221      }
15222    
15223      public interface PrepareRecoveryResponseProtoOrBuilder
15224          extends com.google.protobuf.MessageOrBuilder {
15225    
15226        // optional .hadoop.hdfs.SegmentStateProto segmentState = 1;
15227        /**
15228         * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15229         */
15230        boolean hasSegmentState();
15231        /**
15232         * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15233         */
15234        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState();
15235        /**
15236         * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15237         */
15238        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder();
15239    
15240        // optional uint64 acceptedInEpoch = 2;
15241        /**
15242         * <code>optional uint64 acceptedInEpoch = 2;</code>
15243         */
15244        boolean hasAcceptedInEpoch();
15245        /**
15246         * <code>optional uint64 acceptedInEpoch = 2;</code>
15247         */
15248        long getAcceptedInEpoch();
15249    
15250        // required uint64 lastWriterEpoch = 3;
15251        /**
15252         * <code>required uint64 lastWriterEpoch = 3;</code>
15253         */
15254        boolean hasLastWriterEpoch();
15255        /**
15256         * <code>required uint64 lastWriterEpoch = 3;</code>
15257         */
15258        long getLastWriterEpoch();
15259    
15260        // optional uint64 lastCommittedTxId = 4;
15261        /**
15262         * <code>optional uint64 lastCommittedTxId = 4;</code>
15263         *
15264         * <pre>
15265         * The highest committed txid that this logger has ever seen.
15266         * This may be higher than the data it actually has, in the case
15267         * that it was lagging before the old writer crashed.
15268         * </pre>
15269         */
15270        boolean hasLastCommittedTxId();
15271        /**
15272         * <code>optional uint64 lastCommittedTxId = 4;</code>
15273         *
15274         * <pre>
15275         * The highest committed txid that this logger has ever seen.
15276         * This may be higher than the data it actually has, in the case
15277         * that it was lagging before the old writer crashed.
15278         * </pre>
15279         */
15280        long getLastCommittedTxId();
15281      }
15282      /**
15283       * Protobuf type {@code hadoop.hdfs.PrepareRecoveryResponseProto}
15284       */
15285      public static final class PrepareRecoveryResponseProto extends
15286          com.google.protobuf.GeneratedMessage
15287          implements PrepareRecoveryResponseProtoOrBuilder {
15288        // Use PrepareRecoveryResponseProto.newBuilder() to construct.
15289        private PrepareRecoveryResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
15290          super(builder);
15291          this.unknownFields = builder.getUnknownFields();
15292        }
15293        private PrepareRecoveryResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
15294    
15295        private static final PrepareRecoveryResponseProto defaultInstance;
15296        public static PrepareRecoveryResponseProto getDefaultInstance() {
15297          return defaultInstance;
15298        }
15299    
15300        public PrepareRecoveryResponseProto getDefaultInstanceForType() {
15301          return defaultInstance;
15302        }
15303    
15304        private final com.google.protobuf.UnknownFieldSet unknownFields;
15305        @java.lang.Override
15306        public final com.google.protobuf.UnknownFieldSet
15307            getUnknownFields() {
15308          return this.unknownFields;
15309        }
15310        private PrepareRecoveryResponseProto(
15311            com.google.protobuf.CodedInputStream input,
15312            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15313            throws com.google.protobuf.InvalidProtocolBufferException {
15314          initFields();
15315          int mutable_bitField0_ = 0;
15316          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
15317              com.google.protobuf.UnknownFieldSet.newBuilder();
15318          try {
15319            boolean done = false;
15320            while (!done) {
15321              int tag = input.readTag();
15322              switch (tag) {
15323                case 0:
15324                  done = true;
15325                  break;
15326                default: {
15327                  if (!parseUnknownField(input, unknownFields,
15328                                         extensionRegistry, tag)) {
15329                    done = true;
15330                  }
15331                  break;
15332                }
15333                case 10: {
15334                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder subBuilder = null;
15335                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
15336                    subBuilder = segmentState_.toBuilder();
15337                  }
15338                  segmentState_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.PARSER, extensionRegistry);
15339                  if (subBuilder != null) {
15340                    subBuilder.mergeFrom(segmentState_);
15341                    segmentState_ = subBuilder.buildPartial();
15342                  }
15343                  bitField0_ |= 0x00000001;
15344                  break;
15345                }
15346                case 16: {
15347                  bitField0_ |= 0x00000002;
15348                  acceptedInEpoch_ = input.readUInt64();
15349                  break;
15350                }
15351                case 24: {
15352                  bitField0_ |= 0x00000004;
15353                  lastWriterEpoch_ = input.readUInt64();
15354                  break;
15355                }
15356                case 32: {
15357                  bitField0_ |= 0x00000008;
15358                  lastCommittedTxId_ = input.readUInt64();
15359                  break;
15360                }
15361              }
15362            }
15363          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
15364            throw e.setUnfinishedMessage(this);
15365          } catch (java.io.IOException e) {
15366            throw new com.google.protobuf.InvalidProtocolBufferException(
15367                e.getMessage()).setUnfinishedMessage(this);
15368          } finally {
15369            this.unknownFields = unknownFields.build();
15370            makeExtensionsImmutable();
15371          }
15372        }
15373        public static final com.google.protobuf.Descriptors.Descriptor
15374            getDescriptor() {
15375          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor;
15376        }
15377    
15378        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
15379            internalGetFieldAccessorTable() {
15380          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_fieldAccessorTable
15381              .ensureFieldAccessorsInitialized(
15382                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.Builder.class);
15383        }
15384    
15385        public static com.google.protobuf.Parser<PrepareRecoveryResponseProto> PARSER =
15386            new com.google.protobuf.AbstractParser<PrepareRecoveryResponseProto>() {
15387          public PrepareRecoveryResponseProto parsePartialFrom(
15388              com.google.protobuf.CodedInputStream input,
15389              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15390              throws com.google.protobuf.InvalidProtocolBufferException {
15391            return new PrepareRecoveryResponseProto(input, extensionRegistry);
15392          }
15393        };
15394    
15395        @java.lang.Override
15396        public com.google.protobuf.Parser<PrepareRecoveryResponseProto> getParserForType() {
15397          return PARSER;
15398        }
15399    
15400        private int bitField0_;
15401        // optional .hadoop.hdfs.SegmentStateProto segmentState = 1;
15402        public static final int SEGMENTSTATE_FIELD_NUMBER = 1;
15403        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_;
15404        /**
15405         * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15406         */
15407        public boolean hasSegmentState() {
15408          return ((bitField0_ & 0x00000001) == 0x00000001);
15409        }
15410        /**
15411         * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15412         */
15413        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
15414          return segmentState_;
15415        }
15416        /**
15417         * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15418         */
15419        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
15420          return segmentState_;
15421        }
15422    
15423        // optional uint64 acceptedInEpoch = 2;
15424        public static final int ACCEPTEDINEPOCH_FIELD_NUMBER = 2;
15425        private long acceptedInEpoch_;
15426        /**
15427         * <code>optional uint64 acceptedInEpoch = 2;</code>
15428         */
15429        public boolean hasAcceptedInEpoch() {
15430          return ((bitField0_ & 0x00000002) == 0x00000002);
15431        }
15432        /**
15433         * <code>optional uint64 acceptedInEpoch = 2;</code>
15434         */
15435        public long getAcceptedInEpoch() {
15436          return acceptedInEpoch_;
15437        }
15438    
15439        // required uint64 lastWriterEpoch = 3;
15440        public static final int LASTWRITEREPOCH_FIELD_NUMBER = 3;
15441        private long lastWriterEpoch_;
15442        /**
15443         * <code>required uint64 lastWriterEpoch = 3;</code>
15444         */
15445        public boolean hasLastWriterEpoch() {
15446          return ((bitField0_ & 0x00000004) == 0x00000004);
15447        }
15448        /**
15449         * <code>required uint64 lastWriterEpoch = 3;</code>
15450         */
15451        public long getLastWriterEpoch() {
15452          return lastWriterEpoch_;
15453        }
15454    
15455        // optional uint64 lastCommittedTxId = 4;
15456        public static final int LASTCOMMITTEDTXID_FIELD_NUMBER = 4;
15457        private long lastCommittedTxId_;
15458        /**
15459         * <code>optional uint64 lastCommittedTxId = 4;</code>
15460         *
15461         * <pre>
15462         * The highest committed txid that this logger has ever seen.
15463         * This may be higher than the data it actually has, in the case
15464         * that it was lagging before the old writer crashed.
15465         * </pre>
15466         */
15467        public boolean hasLastCommittedTxId() {
15468          return ((bitField0_ & 0x00000008) == 0x00000008);
15469        }
15470        /**
15471         * <code>optional uint64 lastCommittedTxId = 4;</code>
15472         *
15473         * <pre>
15474         * The highest committed txid that this logger has ever seen.
15475         * This may be higher than the data it actually has, in the case
15476         * that it was lagging before the old writer crashed.
15477         * </pre>
15478         */
15479        public long getLastCommittedTxId() {
15480          return lastCommittedTxId_;
15481        }
15482    
15483        private void initFields() {
15484          segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
15485          acceptedInEpoch_ = 0L;
15486          lastWriterEpoch_ = 0L;
15487          lastCommittedTxId_ = 0L;
15488        }
15489        private byte memoizedIsInitialized = -1;
15490        public final boolean isInitialized() {
15491          byte isInitialized = memoizedIsInitialized;
15492          if (isInitialized != -1) return isInitialized == 1;
15493    
15494          if (!hasLastWriterEpoch()) {
15495            memoizedIsInitialized = 0;
15496            return false;
15497          }
15498          if (hasSegmentState()) {
15499            if (!getSegmentState().isInitialized()) {
15500              memoizedIsInitialized = 0;
15501              return false;
15502            }
15503          }
15504          memoizedIsInitialized = 1;
15505          return true;
15506        }
15507    
15508        public void writeTo(com.google.protobuf.CodedOutputStream output)
15509                            throws java.io.IOException {
15510          getSerializedSize();
15511          if (((bitField0_ & 0x00000001) == 0x00000001)) {
15512            output.writeMessage(1, segmentState_);
15513          }
15514          if (((bitField0_ & 0x00000002) == 0x00000002)) {
15515            output.writeUInt64(2, acceptedInEpoch_);
15516          }
15517          if (((bitField0_ & 0x00000004) == 0x00000004)) {
15518            output.writeUInt64(3, lastWriterEpoch_);
15519          }
15520          if (((bitField0_ & 0x00000008) == 0x00000008)) {
15521            output.writeUInt64(4, lastCommittedTxId_);
15522          }
15523          getUnknownFields().writeTo(output);
15524        }
15525    
15526        private int memoizedSerializedSize = -1;
15527        public int getSerializedSize() {
15528          int size = memoizedSerializedSize;
15529          if (size != -1) return size;
15530    
15531          size = 0;
15532          if (((bitField0_ & 0x00000001) == 0x00000001)) {
15533            size += com.google.protobuf.CodedOutputStream
15534              .computeMessageSize(1, segmentState_);
15535          }
15536          if (((bitField0_ & 0x00000002) == 0x00000002)) {
15537            size += com.google.protobuf.CodedOutputStream
15538              .computeUInt64Size(2, acceptedInEpoch_);
15539          }
15540          if (((bitField0_ & 0x00000004) == 0x00000004)) {
15541            size += com.google.protobuf.CodedOutputStream
15542              .computeUInt64Size(3, lastWriterEpoch_);
15543          }
15544          if (((bitField0_ & 0x00000008) == 0x00000008)) {
15545            size += com.google.protobuf.CodedOutputStream
15546              .computeUInt64Size(4, lastCommittedTxId_);
15547          }
15548          size += getUnknownFields().getSerializedSize();
15549          memoizedSerializedSize = size;
15550          return size;
15551        }
15552    
15553        private static final long serialVersionUID = 0L;
15554        @java.lang.Override
15555        protected java.lang.Object writeReplace()
15556            throws java.io.ObjectStreamException {
15557          return super.writeReplace();
15558        }
15559    
15560        @java.lang.Override
15561        public boolean equals(final java.lang.Object obj) {
15562          if (obj == this) {
15563           return true;
15564          }
15565          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto)) {
15566            return super.equals(obj);
15567          }
15568          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) obj;
15569    
15570          boolean result = true;
15571          result = result && (hasSegmentState() == other.hasSegmentState());
15572          if (hasSegmentState()) {
15573            result = result && getSegmentState()
15574                .equals(other.getSegmentState());
15575          }
15576          result = result && (hasAcceptedInEpoch() == other.hasAcceptedInEpoch());
15577          if (hasAcceptedInEpoch()) {
15578            result = result && (getAcceptedInEpoch()
15579                == other.getAcceptedInEpoch());
15580          }
15581          result = result && (hasLastWriterEpoch() == other.hasLastWriterEpoch());
15582          if (hasLastWriterEpoch()) {
15583            result = result && (getLastWriterEpoch()
15584                == other.getLastWriterEpoch());
15585          }
15586          result = result && (hasLastCommittedTxId() == other.hasLastCommittedTxId());
15587          if (hasLastCommittedTxId()) {
15588            result = result && (getLastCommittedTxId()
15589                == other.getLastCommittedTxId());
15590          }
15591          result = result &&
15592              getUnknownFields().equals(other.getUnknownFields());
15593          return result;
15594        }
15595    
15596        private int memoizedHashCode = 0;
15597        @java.lang.Override
15598        public int hashCode() {
15599          if (memoizedHashCode != 0) {
15600            return memoizedHashCode;
15601          }
15602          int hash = 41;
15603          hash = (19 * hash) + getDescriptorForType().hashCode();
15604          if (hasSegmentState()) {
15605            hash = (37 * hash) + SEGMENTSTATE_FIELD_NUMBER;
15606            hash = (53 * hash) + getSegmentState().hashCode();
15607          }
15608          if (hasAcceptedInEpoch()) {
15609            hash = (37 * hash) + ACCEPTEDINEPOCH_FIELD_NUMBER;
15610            hash = (53 * hash) + hashLong(getAcceptedInEpoch());
15611          }
15612          if (hasLastWriterEpoch()) {
15613            hash = (37 * hash) + LASTWRITEREPOCH_FIELD_NUMBER;
15614            hash = (53 * hash) + hashLong(getLastWriterEpoch());
15615          }
15616          if (hasLastCommittedTxId()) {
15617            hash = (37 * hash) + LASTCOMMITTEDTXID_FIELD_NUMBER;
15618            hash = (53 * hash) + hashLong(getLastCommittedTxId());
15619          }
15620          hash = (29 * hash) + getUnknownFields().hashCode();
15621          memoizedHashCode = hash;
15622          return hash;
15623        }
15624    
15625        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
15626            com.google.protobuf.ByteString data)
15627            throws com.google.protobuf.InvalidProtocolBufferException {
15628          return PARSER.parseFrom(data);
15629        }
15630        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
15631            com.google.protobuf.ByteString data,
15632            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15633            throws com.google.protobuf.InvalidProtocolBufferException {
15634          return PARSER.parseFrom(data, extensionRegistry);
15635        }
15636        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(byte[] data)
15637            throws com.google.protobuf.InvalidProtocolBufferException {
15638          return PARSER.parseFrom(data);
15639        }
15640        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
15641            byte[] data,
15642            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15643            throws com.google.protobuf.InvalidProtocolBufferException {
15644          return PARSER.parseFrom(data, extensionRegistry);
15645        }
15646        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(java.io.InputStream input)
15647            throws java.io.IOException {
15648          return PARSER.parseFrom(input);
15649        }
15650        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
15651            java.io.InputStream input,
15652            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15653            throws java.io.IOException {
15654          return PARSER.parseFrom(input, extensionRegistry);
15655        }
15656        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseDelimitedFrom(java.io.InputStream input)
15657            throws java.io.IOException {
15658          return PARSER.parseDelimitedFrom(input);
15659        }
15660        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseDelimitedFrom(
15661            java.io.InputStream input,
15662            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15663            throws java.io.IOException {
15664          return PARSER.parseDelimitedFrom(input, extensionRegistry);
15665        }
15666        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
15667            com.google.protobuf.CodedInputStream input)
15668            throws java.io.IOException {
15669          return PARSER.parseFrom(input);
15670        }
15671        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
15672            com.google.protobuf.CodedInputStream input,
15673            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15674            throws java.io.IOException {
15675          return PARSER.parseFrom(input, extensionRegistry);
15676        }
15677    
15678        public static Builder newBuilder() { return Builder.create(); }
15679        public Builder newBuilderForType() { return newBuilder(); }
15680        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto prototype) {
15681          return newBuilder().mergeFrom(prototype);
15682        }
15683        public Builder toBuilder() { return newBuilder(this); }
15684    
15685        @java.lang.Override
15686        protected Builder newBuilderForType(
15687            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
15688          Builder builder = new Builder(parent);
15689          return builder;
15690        }
15691        /**
15692         * Protobuf type {@code hadoop.hdfs.PrepareRecoveryResponseProto}
15693         */
15694        public static final class Builder extends
15695            com.google.protobuf.GeneratedMessage.Builder<Builder>
15696           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProtoOrBuilder {
15697          public static final com.google.protobuf.Descriptors.Descriptor
15698              getDescriptor() {
15699            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor;
15700          }
15701    
15702          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
15703              internalGetFieldAccessorTable() {
15704            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_fieldAccessorTable
15705                .ensureFieldAccessorsInitialized(
15706                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.Builder.class);
15707          }
15708    
15709          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.newBuilder()
15710          private Builder() {
15711            maybeForceBuilderInitialization();
15712          }
15713    
15714          private Builder(
15715              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
15716            super(parent);
15717            maybeForceBuilderInitialization();
15718          }
15719          private void maybeForceBuilderInitialization() {
15720            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
15721              getSegmentStateFieldBuilder();
15722            }
15723          }
15724          private static Builder create() {
15725            return new Builder();
15726          }
15727    
15728          public Builder clear() {
15729            super.clear();
15730            if (segmentStateBuilder_ == null) {
15731              segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
15732            } else {
15733              segmentStateBuilder_.clear();
15734            }
15735            bitField0_ = (bitField0_ & ~0x00000001);
15736            acceptedInEpoch_ = 0L;
15737            bitField0_ = (bitField0_ & ~0x00000002);
15738            lastWriterEpoch_ = 0L;
15739            bitField0_ = (bitField0_ & ~0x00000004);
15740            lastCommittedTxId_ = 0L;
15741            bitField0_ = (bitField0_ & ~0x00000008);
15742            return this;
15743          }
15744    
15745          public Builder clone() {
15746            return create().mergeFrom(buildPartial());
15747          }
15748    
15749          public com.google.protobuf.Descriptors.Descriptor
15750              getDescriptorForType() {
15751            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor;
15752          }
15753    
15754          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto getDefaultInstanceForType() {
15755            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance();
15756          }
15757    
15758          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto build() {
15759            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto result = buildPartial();
15760            if (!result.isInitialized()) {
15761              throw newUninitializedMessageException(result);
15762            }
15763            return result;
15764          }
15765    
15766          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto buildPartial() {
15767            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto(this);
15768            int from_bitField0_ = bitField0_;
15769            int to_bitField0_ = 0;
15770            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
15771              to_bitField0_ |= 0x00000001;
15772            }
15773            if (segmentStateBuilder_ == null) {
15774              result.segmentState_ = segmentState_;
15775            } else {
15776              result.segmentState_ = segmentStateBuilder_.build();
15777            }
15778            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
15779              to_bitField0_ |= 0x00000002;
15780            }
15781            result.acceptedInEpoch_ = acceptedInEpoch_;
15782            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
15783              to_bitField0_ |= 0x00000004;
15784            }
15785            result.lastWriterEpoch_ = lastWriterEpoch_;
15786            if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
15787              to_bitField0_ |= 0x00000008;
15788            }
15789            result.lastCommittedTxId_ = lastCommittedTxId_;
15790            result.bitField0_ = to_bitField0_;
15791            onBuilt();
15792            return result;
15793          }
15794    
15795          public Builder mergeFrom(com.google.protobuf.Message other) {
15796            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) {
15797              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto)other);
15798            } else {
15799              super.mergeFrom(other);
15800              return this;
15801            }
15802          }
15803    
15804          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto other) {
15805            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance()) return this;
15806            if (other.hasSegmentState()) {
15807              mergeSegmentState(other.getSegmentState());
15808            }
15809            if (other.hasAcceptedInEpoch()) {
15810              setAcceptedInEpoch(other.getAcceptedInEpoch());
15811            }
15812            if (other.hasLastWriterEpoch()) {
15813              setLastWriterEpoch(other.getLastWriterEpoch());
15814            }
15815            if (other.hasLastCommittedTxId()) {
15816              setLastCommittedTxId(other.getLastCommittedTxId());
15817            }
15818            this.mergeUnknownFields(other.getUnknownFields());
15819            return this;
15820          }
15821    
15822          public final boolean isInitialized() {
15823            if (!hasLastWriterEpoch()) {
15824              
15825              return false;
15826            }
15827            if (hasSegmentState()) {
15828              if (!getSegmentState().isInitialized()) {
15829                
15830                return false;
15831              }
15832            }
15833            return true;
15834          }
15835    
15836          public Builder mergeFrom(
15837              com.google.protobuf.CodedInputStream input,
15838              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15839              throws java.io.IOException {
15840            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parsedMessage = null;
15841            try {
15842              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
15843            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
15844              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) e.getUnfinishedMessage();
15845              throw e;
15846            } finally {
15847              if (parsedMessage != null) {
15848                mergeFrom(parsedMessage);
15849              }
15850            }
15851            return this;
15852          }
15853          private int bitField0_;
15854    
15855          // optional .hadoop.hdfs.SegmentStateProto segmentState = 1;
15856          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
15857          private com.google.protobuf.SingleFieldBuilder<
15858              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> segmentStateBuilder_;
15859          /**
15860           * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15861           */
15862          public boolean hasSegmentState() {
15863            return ((bitField0_ & 0x00000001) == 0x00000001);
15864          }
15865          /**
15866           * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15867           */
15868          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
15869            if (segmentStateBuilder_ == null) {
15870              return segmentState_;
15871            } else {
15872              return segmentStateBuilder_.getMessage();
15873            }
15874          }
15875          /**
15876           * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15877           */
15878          public Builder setSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
15879            if (segmentStateBuilder_ == null) {
15880              if (value == null) {
15881                throw new NullPointerException();
15882              }
15883              segmentState_ = value;
15884              onChanged();
15885            } else {
15886              segmentStateBuilder_.setMessage(value);
15887            }
15888            bitField0_ |= 0x00000001;
15889            return this;
15890          }
15891          /**
15892           * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15893           */
15894          public Builder setSegmentState(
15895              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) {
15896            if (segmentStateBuilder_ == null) {
15897              segmentState_ = builderForValue.build();
15898              onChanged();
15899            } else {
15900              segmentStateBuilder_.setMessage(builderForValue.build());
15901            }
15902            bitField0_ |= 0x00000001;
15903            return this;
15904          }
15905          /**
15906           * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15907           */
15908          public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
15909            if (segmentStateBuilder_ == null) {
15910              if (((bitField0_ & 0x00000001) == 0x00000001) &&
15911                  segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
15912                segmentState_ =
15913                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial();
15914              } else {
15915                segmentState_ = value;
15916              }
15917              onChanged();
15918            } else {
15919              segmentStateBuilder_.mergeFrom(value);
15920            }
15921            bitField0_ |= 0x00000001;
15922            return this;
15923          }
15924          /**
15925           * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15926           */
15927          public Builder clearSegmentState() {
15928            if (segmentStateBuilder_ == null) {
15929              segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
15930              onChanged();
15931            } else {
15932              segmentStateBuilder_.clear();
15933            }
15934            bitField0_ = (bitField0_ & ~0x00000001);
15935            return this;
15936          }
15937          /**
15938           * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15939           */
15940          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getSegmentStateBuilder() {
15941            bitField0_ |= 0x00000001;
15942            onChanged();
15943            return getSegmentStateFieldBuilder().getBuilder();
15944          }
15945          /**
15946           * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15947           */
15948          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
15949            if (segmentStateBuilder_ != null) {
15950              return segmentStateBuilder_.getMessageOrBuilder();
15951            } else {
15952              return segmentState_;
15953            }
15954          }
15955          /**
15956           * <code>optional .hadoop.hdfs.SegmentStateProto segmentState = 1;</code>
15957           */
15958          private com.google.protobuf.SingleFieldBuilder<
15959              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> 
15960              getSegmentStateFieldBuilder() {
15961            if (segmentStateBuilder_ == null) {
15962              segmentStateBuilder_ = new com.google.protobuf.SingleFieldBuilder<
15963                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>(
15964                      segmentState_,
15965                      getParentForChildren(),
15966                      isClean());
15967              segmentState_ = null;
15968            }
15969            return segmentStateBuilder_;
15970          }
15971    
15972          // optional uint64 acceptedInEpoch = 2;
15973          private long acceptedInEpoch_ ;
15974          /**
15975           * <code>optional uint64 acceptedInEpoch = 2;</code>
15976           */
15977          public boolean hasAcceptedInEpoch() {
15978            return ((bitField0_ & 0x00000002) == 0x00000002);
15979          }
15980          /**
15981           * <code>optional uint64 acceptedInEpoch = 2;</code>
15982           */
15983          public long getAcceptedInEpoch() {
15984            return acceptedInEpoch_;
15985          }
15986          /**
15987           * <code>optional uint64 acceptedInEpoch = 2;</code>
15988           */
15989          public Builder setAcceptedInEpoch(long value) {
15990            bitField0_ |= 0x00000002;
15991            acceptedInEpoch_ = value;
15992            onChanged();
15993            return this;
15994          }
15995          /**
15996           * <code>optional uint64 acceptedInEpoch = 2;</code>
15997           */
15998          public Builder clearAcceptedInEpoch() {
15999            bitField0_ = (bitField0_ & ~0x00000002);
16000            acceptedInEpoch_ = 0L;
16001            onChanged();
16002            return this;
16003          }
16004    
16005          // required uint64 lastWriterEpoch = 3;
16006          private long lastWriterEpoch_ ;
16007          /**
16008           * <code>required uint64 lastWriterEpoch = 3;</code>
16009           */
16010          public boolean hasLastWriterEpoch() {
16011            return ((bitField0_ & 0x00000004) == 0x00000004);
16012          }
16013          /**
16014           * <code>required uint64 lastWriterEpoch = 3;</code>
16015           */
16016          public long getLastWriterEpoch() {
16017            return lastWriterEpoch_;
16018          }
16019          /**
16020           * <code>required uint64 lastWriterEpoch = 3;</code>
16021           */
16022          public Builder setLastWriterEpoch(long value) {
16023            bitField0_ |= 0x00000004;
16024            lastWriterEpoch_ = value;
16025            onChanged();
16026            return this;
16027          }
16028          /**
16029           * <code>required uint64 lastWriterEpoch = 3;</code>
16030           */
16031          public Builder clearLastWriterEpoch() {
16032            bitField0_ = (bitField0_ & ~0x00000004);
16033            lastWriterEpoch_ = 0L;
16034            onChanged();
16035            return this;
16036          }
16037    
16038          // optional uint64 lastCommittedTxId = 4;
16039          private long lastCommittedTxId_ ;
16040          /**
16041           * <code>optional uint64 lastCommittedTxId = 4;</code>
16042           *
16043           * <pre>
16044           * The highest committed txid that this logger has ever seen.
16045           * This may be higher than the data it actually has, in the case
16046           * that it was lagging before the old writer crashed.
16047           * </pre>
16048           */
16049          public boolean hasLastCommittedTxId() {
16050            return ((bitField0_ & 0x00000008) == 0x00000008);
16051          }
16052          /**
16053           * <code>optional uint64 lastCommittedTxId = 4;</code>
16054           *
16055           * <pre>
16056           * The highest committed txid that this logger has ever seen.
16057           * This may be higher than the data it actually has, in the case
16058           * that it was lagging before the old writer crashed.
16059           * </pre>
16060           */
16061          public long getLastCommittedTxId() {
16062            return lastCommittedTxId_;
16063          }
16064          /**
16065           * <code>optional uint64 lastCommittedTxId = 4;</code>
16066           *
16067           * <pre>
16068           * The highest committed txid that this logger has ever seen.
16069           * This may be higher than the data it actually has, in the case
16070           * that it was lagging before the old writer crashed.
16071           * </pre>
16072           */
16073          public Builder setLastCommittedTxId(long value) {
16074            bitField0_ |= 0x00000008;
16075            lastCommittedTxId_ = value;
16076            onChanged();
16077            return this;
16078          }
16079          /**
16080           * <code>optional uint64 lastCommittedTxId = 4;</code>
16081           *
16082           * <pre>
16083           * The highest committed txid that this logger has ever seen.
16084           * This may be higher than the data it actually has, in the case
16085           * that it was lagging before the old writer crashed.
16086           * </pre>
16087           */
16088          public Builder clearLastCommittedTxId() {
16089            bitField0_ = (bitField0_ & ~0x00000008);
16090            lastCommittedTxId_ = 0L;
16091            onChanged();
16092            return this;
16093          }
16094    
16095          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PrepareRecoveryResponseProto)
16096        }
16097    
16098        static {
16099          defaultInstance = new PrepareRecoveryResponseProto(true);
16100          defaultInstance.initFields();
16101        }
16102    
16103        // @@protoc_insertion_point(class_scope:hadoop.hdfs.PrepareRecoveryResponseProto)
16104      }
16105    
16106      public interface AcceptRecoveryRequestProtoOrBuilder
16107          extends com.google.protobuf.MessageOrBuilder {
16108    
16109        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
16110        /**
16111         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16112         */
16113        boolean hasReqInfo();
16114        /**
16115         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16116         */
16117        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
16118        /**
16119         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16120         */
16121        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
16122    
16123        // required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;
16124        /**
16125         * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16126         *
16127         * <pre>
16128         ** Details on the segment to recover 
16129         * </pre>
16130         */
16131        boolean hasStateToAccept();
16132        /**
16133         * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16134         *
16135         * <pre>
16136         ** Details on the segment to recover 
16137         * </pre>
16138         */
16139        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getStateToAccept();
16140        /**
16141         * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16142         *
16143         * <pre>
16144         ** Details on the segment to recover 
16145         * </pre>
16146         */
16147        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getStateToAcceptOrBuilder();
16148    
16149        // required string fromURL = 3;
16150        /**
16151         * <code>required string fromURL = 3;</code>
16152         *
16153         * <pre>
16154         ** The URL from which the log may be copied 
16155         * </pre>
16156         */
16157        boolean hasFromURL();
16158        /**
16159         * <code>required string fromURL = 3;</code>
16160         *
16161         * <pre>
16162         ** The URL from which the log may be copied 
16163         * </pre>
16164         */
16165        java.lang.String getFromURL();
16166        /**
16167         * <code>required string fromURL = 3;</code>
16168         *
16169         * <pre>
16170         ** The URL from which the log may be copied 
16171         * </pre>
16172         */
16173        com.google.protobuf.ByteString
16174            getFromURLBytes();
16175      }
16176      /**
16177       * Protobuf type {@code hadoop.hdfs.AcceptRecoveryRequestProto}
16178       *
16179       * <pre>
16180       **
16181       * acceptRecovery()
16182       * </pre>
16183       */
16184      public static final class AcceptRecoveryRequestProto extends
16185          com.google.protobuf.GeneratedMessage
16186          implements AcceptRecoveryRequestProtoOrBuilder {
16187        // Use AcceptRecoveryRequestProto.newBuilder() to construct.
16188        private AcceptRecoveryRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
16189          super(builder);
16190          this.unknownFields = builder.getUnknownFields();
16191        }
16192        private AcceptRecoveryRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
16193    
16194        private static final AcceptRecoveryRequestProto defaultInstance;
16195        public static AcceptRecoveryRequestProto getDefaultInstance() {
16196          return defaultInstance;
16197        }
16198    
16199        public AcceptRecoveryRequestProto getDefaultInstanceForType() {
16200          return defaultInstance;
16201        }
16202    
16203        private final com.google.protobuf.UnknownFieldSet unknownFields;
16204        @java.lang.Override
16205        public final com.google.protobuf.UnknownFieldSet
16206            getUnknownFields() {
16207          return this.unknownFields;
16208        }
16209        private AcceptRecoveryRequestProto(
16210            com.google.protobuf.CodedInputStream input,
16211            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16212            throws com.google.protobuf.InvalidProtocolBufferException {
16213          initFields();
16214          int mutable_bitField0_ = 0;
16215          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
16216              com.google.protobuf.UnknownFieldSet.newBuilder();
16217          try {
16218            boolean done = false;
16219            while (!done) {
16220              int tag = input.readTag();
16221              switch (tag) {
16222                case 0:
16223                  done = true;
16224                  break;
16225                default: {
16226                  if (!parseUnknownField(input, unknownFields,
16227                                         extensionRegistry, tag)) {
16228                    done = true;
16229                  }
16230                  break;
16231                }
16232                case 10: {
16233                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
16234                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
16235                    subBuilder = reqInfo_.toBuilder();
16236                  }
16237                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
16238                  if (subBuilder != null) {
16239                    subBuilder.mergeFrom(reqInfo_);
16240                    reqInfo_ = subBuilder.buildPartial();
16241                  }
16242                  bitField0_ |= 0x00000001;
16243                  break;
16244                }
16245                case 18: {
16246                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder subBuilder = null;
16247                  if (((bitField0_ & 0x00000002) == 0x00000002)) {
16248                    subBuilder = stateToAccept_.toBuilder();
16249                  }
16250                  stateToAccept_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.PARSER, extensionRegistry);
16251                  if (subBuilder != null) {
16252                    subBuilder.mergeFrom(stateToAccept_);
16253                    stateToAccept_ = subBuilder.buildPartial();
16254                  }
16255                  bitField0_ |= 0x00000002;
16256                  break;
16257                }
16258                case 26: {
16259                  bitField0_ |= 0x00000004;
16260                  fromURL_ = input.readBytes();
16261                  break;
16262                }
16263              }
16264            }
16265          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
16266            throw e.setUnfinishedMessage(this);
16267          } catch (java.io.IOException e) {
16268            throw new com.google.protobuf.InvalidProtocolBufferException(
16269                e.getMessage()).setUnfinishedMessage(this);
16270          } finally {
16271            this.unknownFields = unknownFields.build();
16272            makeExtensionsImmutable();
16273          }
16274        }
16275        public static final com.google.protobuf.Descriptors.Descriptor
16276            getDescriptor() {
16277          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor;
16278        }
16279    
16280        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
16281            internalGetFieldAccessorTable() {
16282          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_fieldAccessorTable
16283              .ensureFieldAccessorsInitialized(
16284                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.Builder.class);
16285        }
16286    
16287        public static com.google.protobuf.Parser<AcceptRecoveryRequestProto> PARSER =
16288            new com.google.protobuf.AbstractParser<AcceptRecoveryRequestProto>() {
16289          public AcceptRecoveryRequestProto parsePartialFrom(
16290              com.google.protobuf.CodedInputStream input,
16291              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16292              throws com.google.protobuf.InvalidProtocolBufferException {
16293            return new AcceptRecoveryRequestProto(input, extensionRegistry);
16294          }
16295        };
16296    
16297        @java.lang.Override
16298        public com.google.protobuf.Parser<AcceptRecoveryRequestProto> getParserForType() {
16299          return PARSER;
16300        }
16301    
16302        private int bitField0_;
16303        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
16304        public static final int REQINFO_FIELD_NUMBER = 1;
16305        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
16306        /**
16307         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16308         */
16309        public boolean hasReqInfo() {
16310          return ((bitField0_ & 0x00000001) == 0x00000001);
16311        }
16312        /**
16313         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16314         */
16315        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
16316          return reqInfo_;
16317        }
16318        /**
16319         * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16320         */
16321        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
16322          return reqInfo_;
16323        }
16324    
16325        // required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;
16326        public static final int STATETOACCEPT_FIELD_NUMBER = 2;
16327        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto stateToAccept_;
16328        /**
16329         * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16330         *
16331         * <pre>
16332         ** Details on the segment to recover 
16333         * </pre>
16334         */
16335        public boolean hasStateToAccept() {
16336          return ((bitField0_ & 0x00000002) == 0x00000002);
16337        }
16338        /**
16339         * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16340         *
16341         * <pre>
16342         ** Details on the segment to recover 
16343         * </pre>
16344         */
16345        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getStateToAccept() {
16346          return stateToAccept_;
16347        }
16348        /**
16349         * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16350         *
16351         * <pre>
16352         ** Details on the segment to recover 
16353         * </pre>
16354         */
16355        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getStateToAcceptOrBuilder() {
16356          return stateToAccept_;
16357        }
16358    
16359        // required string fromURL = 3;
16360        public static final int FROMURL_FIELD_NUMBER = 3;
16361        private java.lang.Object fromURL_;
16362        /**
16363         * <code>required string fromURL = 3;</code>
16364         *
16365         * <pre>
16366         ** The URL from which the log may be copied 
16367         * </pre>
16368         */
16369        public boolean hasFromURL() {
16370          return ((bitField0_ & 0x00000004) == 0x00000004);
16371        }
16372        /**
16373         * <code>required string fromURL = 3;</code>
16374         *
16375         * <pre>
16376         ** The URL from which the log may be copied 
16377         * </pre>
16378         */
16379        public java.lang.String getFromURL() {
16380          java.lang.Object ref = fromURL_;
16381          if (ref instanceof java.lang.String) {
16382            return (java.lang.String) ref;
16383          } else {
16384            com.google.protobuf.ByteString bs = 
16385                (com.google.protobuf.ByteString) ref;
16386            java.lang.String s = bs.toStringUtf8();
16387            if (bs.isValidUtf8()) {
16388              fromURL_ = s;
16389            }
16390            return s;
16391          }
16392        }
16393        /**
16394         * <code>required string fromURL = 3;</code>
16395         *
16396         * <pre>
16397         ** The URL from which the log may be copied 
16398         * </pre>
16399         */
16400        public com.google.protobuf.ByteString
16401            getFromURLBytes() {
16402          java.lang.Object ref = fromURL_;
16403          if (ref instanceof java.lang.String) {
16404            com.google.protobuf.ByteString b = 
16405                com.google.protobuf.ByteString.copyFromUtf8(
16406                    (java.lang.String) ref);
16407            fromURL_ = b;
16408            return b;
16409          } else {
16410            return (com.google.protobuf.ByteString) ref;
16411          }
16412        }
16413    
16414        private void initFields() {
16415          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
16416          stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
16417          fromURL_ = "";
16418        }
16419        private byte memoizedIsInitialized = -1;
16420        public final boolean isInitialized() {
16421          byte isInitialized = memoizedIsInitialized;
16422          if (isInitialized != -1) return isInitialized == 1;
16423    
16424          if (!hasReqInfo()) {
16425            memoizedIsInitialized = 0;
16426            return false;
16427          }
16428          if (!hasStateToAccept()) {
16429            memoizedIsInitialized = 0;
16430            return false;
16431          }
16432          if (!hasFromURL()) {
16433            memoizedIsInitialized = 0;
16434            return false;
16435          }
16436          if (!getReqInfo().isInitialized()) {
16437            memoizedIsInitialized = 0;
16438            return false;
16439          }
16440          if (!getStateToAccept().isInitialized()) {
16441            memoizedIsInitialized = 0;
16442            return false;
16443          }
16444          memoizedIsInitialized = 1;
16445          return true;
16446        }
16447    
16448        public void writeTo(com.google.protobuf.CodedOutputStream output)
16449                            throws java.io.IOException {
16450          getSerializedSize();
16451          if (((bitField0_ & 0x00000001) == 0x00000001)) {
16452            output.writeMessage(1, reqInfo_);
16453          }
16454          if (((bitField0_ & 0x00000002) == 0x00000002)) {
16455            output.writeMessage(2, stateToAccept_);
16456          }
16457          if (((bitField0_ & 0x00000004) == 0x00000004)) {
16458            output.writeBytes(3, getFromURLBytes());
16459          }
16460          getUnknownFields().writeTo(output);
16461        }
16462    
16463        private int memoizedSerializedSize = -1;
16464        public int getSerializedSize() {
16465          int size = memoizedSerializedSize;
16466          if (size != -1) return size;
16467    
16468          size = 0;
16469          if (((bitField0_ & 0x00000001) == 0x00000001)) {
16470            size += com.google.protobuf.CodedOutputStream
16471              .computeMessageSize(1, reqInfo_);
16472          }
16473          if (((bitField0_ & 0x00000002) == 0x00000002)) {
16474            size += com.google.protobuf.CodedOutputStream
16475              .computeMessageSize(2, stateToAccept_);
16476          }
16477          if (((bitField0_ & 0x00000004) == 0x00000004)) {
16478            size += com.google.protobuf.CodedOutputStream
16479              .computeBytesSize(3, getFromURLBytes());
16480          }
16481          size += getUnknownFields().getSerializedSize();
16482          memoizedSerializedSize = size;
16483          return size;
16484        }
16485    
16486        private static final long serialVersionUID = 0L;
16487        @java.lang.Override
16488        protected java.lang.Object writeReplace()
16489            throws java.io.ObjectStreamException {
16490          return super.writeReplace();
16491        }
16492    
16493        @java.lang.Override
16494        public boolean equals(final java.lang.Object obj) {
16495          if (obj == this) {
16496           return true;
16497          }
16498          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)) {
16499            return super.equals(obj);
16500          }
16501          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto) obj;
16502    
16503          boolean result = true;
16504          result = result && (hasReqInfo() == other.hasReqInfo());
16505          if (hasReqInfo()) {
16506            result = result && getReqInfo()
16507                .equals(other.getReqInfo());
16508          }
16509          result = result && (hasStateToAccept() == other.hasStateToAccept());
16510          if (hasStateToAccept()) {
16511            result = result && getStateToAccept()
16512                .equals(other.getStateToAccept());
16513          }
16514          result = result && (hasFromURL() == other.hasFromURL());
16515          if (hasFromURL()) {
16516            result = result && getFromURL()
16517                .equals(other.getFromURL());
16518          }
16519          result = result &&
16520              getUnknownFields().equals(other.getUnknownFields());
16521          return result;
16522        }
16523    
16524        private int memoizedHashCode = 0;
16525        @java.lang.Override
16526        public int hashCode() {
16527          if (memoizedHashCode != 0) {
16528            return memoizedHashCode;
16529          }
16530          int hash = 41;
16531          hash = (19 * hash) + getDescriptorForType().hashCode();
16532          if (hasReqInfo()) {
16533            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
16534            hash = (53 * hash) + getReqInfo().hashCode();
16535          }
16536          if (hasStateToAccept()) {
16537            hash = (37 * hash) + STATETOACCEPT_FIELD_NUMBER;
16538            hash = (53 * hash) + getStateToAccept().hashCode();
16539          }
16540          if (hasFromURL()) {
16541            hash = (37 * hash) + FROMURL_FIELD_NUMBER;
16542            hash = (53 * hash) + getFromURL().hashCode();
16543          }
16544          hash = (29 * hash) + getUnknownFields().hashCode();
16545          memoizedHashCode = hash;
16546          return hash;
16547        }
16548    
16549        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
16550            com.google.protobuf.ByteString data)
16551            throws com.google.protobuf.InvalidProtocolBufferException {
16552          return PARSER.parseFrom(data);
16553        }
16554        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
16555            com.google.protobuf.ByteString data,
16556            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16557            throws com.google.protobuf.InvalidProtocolBufferException {
16558          return PARSER.parseFrom(data, extensionRegistry);
16559        }
16560        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(byte[] data)
16561            throws com.google.protobuf.InvalidProtocolBufferException {
16562          return PARSER.parseFrom(data);
16563        }
16564        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
16565            byte[] data,
16566            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16567            throws com.google.protobuf.InvalidProtocolBufferException {
16568          return PARSER.parseFrom(data, extensionRegistry);
16569        }
16570        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(java.io.InputStream input)
16571            throws java.io.IOException {
16572          return PARSER.parseFrom(input);
16573        }
16574        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
16575            java.io.InputStream input,
16576            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16577            throws java.io.IOException {
16578          return PARSER.parseFrom(input, extensionRegistry);
16579        }
16580        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseDelimitedFrom(java.io.InputStream input)
16581            throws java.io.IOException {
16582          return PARSER.parseDelimitedFrom(input);
16583        }
16584        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseDelimitedFrom(
16585            java.io.InputStream input,
16586            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16587            throws java.io.IOException {
16588          return PARSER.parseDelimitedFrom(input, extensionRegistry);
16589        }
16590        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
16591            com.google.protobuf.CodedInputStream input)
16592            throws java.io.IOException {
16593          return PARSER.parseFrom(input);
16594        }
16595        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
16596            com.google.protobuf.CodedInputStream input,
16597            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16598            throws java.io.IOException {
16599          return PARSER.parseFrom(input, extensionRegistry);
16600        }
16601    
16602        public static Builder newBuilder() { return Builder.create(); }
16603        public Builder newBuilderForType() { return newBuilder(); }
16604        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto prototype) {
16605          return newBuilder().mergeFrom(prototype);
16606        }
16607        public Builder toBuilder() { return newBuilder(this); }
16608    
16609        @java.lang.Override
16610        protected Builder newBuilderForType(
16611            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
16612          Builder builder = new Builder(parent);
16613          return builder;
16614        }
16615        /**
16616         * Protobuf type {@code hadoop.hdfs.AcceptRecoveryRequestProto}
16617         *
16618         * <pre>
16619         **
16620         * acceptRecovery()
16621         * </pre>
16622         */
16623        public static final class Builder extends
16624            com.google.protobuf.GeneratedMessage.Builder<Builder>
16625           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProtoOrBuilder {
16626          public static final com.google.protobuf.Descriptors.Descriptor
16627              getDescriptor() {
16628            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor;
16629          }
16630    
16631          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
16632              internalGetFieldAccessorTable() {
16633            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_fieldAccessorTable
16634                .ensureFieldAccessorsInitialized(
16635                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.Builder.class);
16636          }
16637    
16638          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.newBuilder()
16639          private Builder() {
16640            maybeForceBuilderInitialization();
16641          }
16642    
16643          private Builder(
16644              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
16645            super(parent);
16646            maybeForceBuilderInitialization();
16647          }
16648          private void maybeForceBuilderInitialization() {
16649            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
16650              getReqInfoFieldBuilder();
16651              getStateToAcceptFieldBuilder();
16652            }
16653          }
16654          private static Builder create() {
16655            return new Builder();
16656          }
16657    
16658          public Builder clear() {
16659            super.clear();
16660            if (reqInfoBuilder_ == null) {
16661              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
16662            } else {
16663              reqInfoBuilder_.clear();
16664            }
16665            bitField0_ = (bitField0_ & ~0x00000001);
16666            if (stateToAcceptBuilder_ == null) {
16667              stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
16668            } else {
16669              stateToAcceptBuilder_.clear();
16670            }
16671            bitField0_ = (bitField0_ & ~0x00000002);
16672            fromURL_ = "";
16673            bitField0_ = (bitField0_ & ~0x00000004);
16674            return this;
16675          }
16676    
16677          public Builder clone() {
16678            return create().mergeFrom(buildPartial());
16679          }
16680    
16681          public com.google.protobuf.Descriptors.Descriptor
16682              getDescriptorForType() {
16683            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor;
16684          }
16685    
16686          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto getDefaultInstanceForType() {
16687            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance();
16688          }
16689    
16690          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto build() {
16691            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto result = buildPartial();
16692            if (!result.isInitialized()) {
16693              throw newUninitializedMessageException(result);
16694            }
16695            return result;
16696          }
16697    
16698          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto buildPartial() {
16699            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto(this);
16700            int from_bitField0_ = bitField0_;
16701            int to_bitField0_ = 0;
16702            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
16703              to_bitField0_ |= 0x00000001;
16704            }
16705            if (reqInfoBuilder_ == null) {
16706              result.reqInfo_ = reqInfo_;
16707            } else {
16708              result.reqInfo_ = reqInfoBuilder_.build();
16709            }
16710            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
16711              to_bitField0_ |= 0x00000002;
16712            }
16713            if (stateToAcceptBuilder_ == null) {
16714              result.stateToAccept_ = stateToAccept_;
16715            } else {
16716              result.stateToAccept_ = stateToAcceptBuilder_.build();
16717            }
16718            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
16719              to_bitField0_ |= 0x00000004;
16720            }
16721            result.fromURL_ = fromURL_;
16722            result.bitField0_ = to_bitField0_;
16723            onBuilt();
16724            return result;
16725          }
16726    
16727          public Builder mergeFrom(com.google.protobuf.Message other) {
16728            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto) {
16729              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)other);
16730            } else {
16731              super.mergeFrom(other);
16732              return this;
16733            }
16734          }
16735    
16736          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto other) {
16737            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance()) return this;
16738            if (other.hasReqInfo()) {
16739              mergeReqInfo(other.getReqInfo());
16740            }
16741            if (other.hasStateToAccept()) {
16742              mergeStateToAccept(other.getStateToAccept());
16743            }
16744            if (other.hasFromURL()) {
16745              bitField0_ |= 0x00000004;
16746              fromURL_ = other.fromURL_;
16747              onChanged();
16748            }
16749            this.mergeUnknownFields(other.getUnknownFields());
16750            return this;
16751          }
16752    
16753          public final boolean isInitialized() {
16754            if (!hasReqInfo()) {
16755              
16756              return false;
16757            }
16758            if (!hasStateToAccept()) {
16759              
16760              return false;
16761            }
16762            if (!hasFromURL()) {
16763              
16764              return false;
16765            }
16766            if (!getReqInfo().isInitialized()) {
16767              
16768              return false;
16769            }
16770            if (!getStateToAccept().isInitialized()) {
16771              
16772              return false;
16773            }
16774            return true;
16775          }
16776    
16777          public Builder mergeFrom(
16778              com.google.protobuf.CodedInputStream input,
16779              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16780              throws java.io.IOException {
16781            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parsedMessage = null;
16782            try {
16783              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
16784            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
16785              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto) e.getUnfinishedMessage();
16786              throw e;
16787            } finally {
16788              if (parsedMessage != null) {
16789                mergeFrom(parsedMessage);
16790              }
16791            }
16792            return this;
16793          }
16794          private int bitField0_;
16795    
16796          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
16797          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
16798          private com.google.protobuf.SingleFieldBuilder<
16799              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
16800          /**
16801           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16802           */
16803          public boolean hasReqInfo() {
16804            return ((bitField0_ & 0x00000001) == 0x00000001);
16805          }
16806          /**
16807           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16808           */
16809          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
16810            if (reqInfoBuilder_ == null) {
16811              return reqInfo_;
16812            } else {
16813              return reqInfoBuilder_.getMessage();
16814            }
16815          }
16816          /**
16817           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16818           */
16819          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
16820            if (reqInfoBuilder_ == null) {
16821              if (value == null) {
16822                throw new NullPointerException();
16823              }
16824              reqInfo_ = value;
16825              onChanged();
16826            } else {
16827              reqInfoBuilder_.setMessage(value);
16828            }
16829            bitField0_ |= 0x00000001;
16830            return this;
16831          }
16832          /**
16833           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16834           */
16835          public Builder setReqInfo(
16836              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
16837            if (reqInfoBuilder_ == null) {
16838              reqInfo_ = builderForValue.build();
16839              onChanged();
16840            } else {
16841              reqInfoBuilder_.setMessage(builderForValue.build());
16842            }
16843            bitField0_ |= 0x00000001;
16844            return this;
16845          }
16846          /**
16847           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16848           */
16849          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
16850            if (reqInfoBuilder_ == null) {
16851              if (((bitField0_ & 0x00000001) == 0x00000001) &&
16852                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
16853                reqInfo_ =
16854                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
16855              } else {
16856                reqInfo_ = value;
16857              }
16858              onChanged();
16859            } else {
16860              reqInfoBuilder_.mergeFrom(value);
16861            }
16862            bitField0_ |= 0x00000001;
16863            return this;
16864          }
16865          /**
16866           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16867           */
16868          public Builder clearReqInfo() {
16869            if (reqInfoBuilder_ == null) {
16870              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
16871              onChanged();
16872            } else {
16873              reqInfoBuilder_.clear();
16874            }
16875            bitField0_ = (bitField0_ & ~0x00000001);
16876            return this;
16877          }
16878          /**
16879           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16880           */
16881          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
16882            bitField0_ |= 0x00000001;
16883            onChanged();
16884            return getReqInfoFieldBuilder().getBuilder();
16885          }
16886          /**
16887           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16888           */
16889          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
16890            if (reqInfoBuilder_ != null) {
16891              return reqInfoBuilder_.getMessageOrBuilder();
16892            } else {
16893              return reqInfo_;
16894            }
16895          }
16896          /**
16897           * <code>required .hadoop.hdfs.RequestInfoProto reqInfo = 1;</code>
16898           */
16899          private com.google.protobuf.SingleFieldBuilder<
16900              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
16901              getReqInfoFieldBuilder() {
16902            if (reqInfoBuilder_ == null) {
16903              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
16904                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
16905                      reqInfo_,
16906                      getParentForChildren(),
16907                      isClean());
16908              reqInfo_ = null;
16909            }
16910            return reqInfoBuilder_;
16911          }
16912    
16913          // required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;
16914          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
16915          private com.google.protobuf.SingleFieldBuilder<
16916              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> stateToAcceptBuilder_;
16917          /**
16918           * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16919           *
16920           * <pre>
16921           ** Details on the segment to recover 
16922           * </pre>
16923           */
16924          public boolean hasStateToAccept() {
16925            return ((bitField0_ & 0x00000002) == 0x00000002);
16926          }
16927          /**
16928           * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16929           *
16930           * <pre>
16931           ** Details on the segment to recover 
16932           * </pre>
16933           */
16934          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getStateToAccept() {
16935            if (stateToAcceptBuilder_ == null) {
16936              return stateToAccept_;
16937            } else {
16938              return stateToAcceptBuilder_.getMessage();
16939            }
16940          }
16941          /**
16942           * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16943           *
16944           * <pre>
16945           ** Details on the segment to recover 
16946           * </pre>
16947           */
16948          public Builder setStateToAccept(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
16949            if (stateToAcceptBuilder_ == null) {
16950              if (value == null) {
16951                throw new NullPointerException();
16952              }
16953              stateToAccept_ = value;
16954              onChanged();
16955            } else {
16956              stateToAcceptBuilder_.setMessage(value);
16957            }
16958            bitField0_ |= 0x00000002;
16959            return this;
16960          }
16961          /**
16962           * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16963           *
16964           * <pre>
16965           ** Details on the segment to recover 
16966           * </pre>
16967           */
16968          public Builder setStateToAccept(
16969              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) {
16970            if (stateToAcceptBuilder_ == null) {
16971              stateToAccept_ = builderForValue.build();
16972              onChanged();
16973            } else {
16974              stateToAcceptBuilder_.setMessage(builderForValue.build());
16975            }
16976            bitField0_ |= 0x00000002;
16977            return this;
16978          }
16979          /**
16980           * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
16981           *
16982           * <pre>
16983           ** Details on the segment to recover 
16984           * </pre>
16985           */
16986          public Builder mergeStateToAccept(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
16987            if (stateToAcceptBuilder_ == null) {
16988              if (((bitField0_ & 0x00000002) == 0x00000002) &&
16989                  stateToAccept_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
16990                stateToAccept_ =
16991                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(stateToAccept_).mergeFrom(value).buildPartial();
16992              } else {
16993                stateToAccept_ = value;
16994              }
16995              onChanged();
16996            } else {
16997              stateToAcceptBuilder_.mergeFrom(value);
16998            }
16999            bitField0_ |= 0x00000002;
17000            return this;
17001          }
17002          /**
17003           * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
17004           *
17005           * <pre>
17006           ** Details on the segment to recover 
17007           * </pre>
17008           */
17009          public Builder clearStateToAccept() {
17010            if (stateToAcceptBuilder_ == null) {
17011              stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
17012              onChanged();
17013            } else {
17014              stateToAcceptBuilder_.clear();
17015            }
17016            bitField0_ = (bitField0_ & ~0x00000002);
17017            return this;
17018          }
17019          /**
17020           * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
17021           *
17022           * <pre>
17023           ** Details on the segment to recover 
17024           * </pre>
17025           */
17026          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getStateToAcceptBuilder() {
17027            bitField0_ |= 0x00000002;
17028            onChanged();
17029            return getStateToAcceptFieldBuilder().getBuilder();
17030          }
17031          /**
17032           * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
17033           *
17034           * <pre>
17035           ** Details on the segment to recover 
17036           * </pre>
17037           */
17038          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getStateToAcceptOrBuilder() {
17039            if (stateToAcceptBuilder_ != null) {
17040              return stateToAcceptBuilder_.getMessageOrBuilder();
17041            } else {
17042              return stateToAccept_;
17043            }
17044          }
17045          /**
17046           * <code>required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;</code>
17047           *
17048           * <pre>
17049           ** Details on the segment to recover 
17050           * </pre>
17051           */
17052          private com.google.protobuf.SingleFieldBuilder<
17053              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> 
17054              getStateToAcceptFieldBuilder() {
17055            if (stateToAcceptBuilder_ == null) {
17056              stateToAcceptBuilder_ = new com.google.protobuf.SingleFieldBuilder<
17057                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>(
17058                      stateToAccept_,
17059                      getParentForChildren(),
17060                      isClean());
17061              stateToAccept_ = null;
17062            }
17063            return stateToAcceptBuilder_;
17064          }
17065    
17066          // required string fromURL = 3;
17067          private java.lang.Object fromURL_ = "";
17068          /**
17069           * <code>required string fromURL = 3;</code>
17070           *
17071           * <pre>
17072           ** The URL from which the log may be copied 
17073           * </pre>
17074           */
17075          public boolean hasFromURL() {
17076            return ((bitField0_ & 0x00000004) == 0x00000004);
17077          }
17078          /**
17079           * <code>required string fromURL = 3;</code>
17080           *
17081           * <pre>
17082           ** The URL from which the log may be copied 
17083           * </pre>
17084           */
17085          public java.lang.String getFromURL() {
17086            java.lang.Object ref = fromURL_;
17087            if (!(ref instanceof java.lang.String)) {
17088              java.lang.String s = ((com.google.protobuf.ByteString) ref)
17089                  .toStringUtf8();
17090              fromURL_ = s;
17091              return s;
17092            } else {
17093              return (java.lang.String) ref;
17094            }
17095          }
17096          /**
17097           * <code>required string fromURL = 3;</code>
17098           *
17099           * <pre>
17100           ** The URL from which the log may be copied 
17101           * </pre>
17102           */
17103          public com.google.protobuf.ByteString
17104              getFromURLBytes() {
17105            java.lang.Object ref = fromURL_;
17106            if (ref instanceof String) {
17107              com.google.protobuf.ByteString b = 
17108                  com.google.protobuf.ByteString.copyFromUtf8(
17109                      (java.lang.String) ref);
17110              fromURL_ = b;
17111              return b;
17112            } else {
17113              return (com.google.protobuf.ByteString) ref;
17114            }
17115          }
17116          /**
17117           * <code>required string fromURL = 3;</code>
17118           *
17119           * <pre>
17120           ** The URL from which the log may be copied 
17121           * </pre>
17122           */
17123          public Builder setFromURL(
17124              java.lang.String value) {
17125            if (value == null) {
17126        throw new NullPointerException();
17127      }
17128      bitField0_ |= 0x00000004;
17129            fromURL_ = value;
17130            onChanged();
17131            return this;
17132          }
17133          /**
17134           * <code>required string fromURL = 3;</code>
17135           *
17136           * <pre>
17137           ** The URL from which the log may be copied 
17138           * </pre>
17139           */
17140          public Builder clearFromURL() {
17141            bitField0_ = (bitField0_ & ~0x00000004);
17142            fromURL_ = getDefaultInstance().getFromURL();
17143            onChanged();
17144            return this;
17145          }
17146          /**
17147           * <code>required string fromURL = 3;</code>
17148           *
17149           * <pre>
17150           ** The URL from which the log may be copied 
17151           * </pre>
17152           */
17153          public Builder setFromURLBytes(
17154              com.google.protobuf.ByteString value) {
17155            if (value == null) {
17156        throw new NullPointerException();
17157      }
17158      bitField0_ |= 0x00000004;
17159            fromURL_ = value;
17160            onChanged();
17161            return this;
17162          }
17163    
17164          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.AcceptRecoveryRequestProto)
17165        }
17166    
17167        static {
17168          defaultInstance = new AcceptRecoveryRequestProto(true);
17169          defaultInstance.initFields();
17170        }
17171    
17172        // @@protoc_insertion_point(class_scope:hadoop.hdfs.AcceptRecoveryRequestProto)
17173      }
17174    
17175      public interface AcceptRecoveryResponseProtoOrBuilder
17176          extends com.google.protobuf.MessageOrBuilder {
17177      }
17178      /**
17179       * Protobuf type {@code hadoop.hdfs.AcceptRecoveryResponseProto}
17180       */
17181      public static final class AcceptRecoveryResponseProto extends
17182          com.google.protobuf.GeneratedMessage
17183          implements AcceptRecoveryResponseProtoOrBuilder {
17184        // Use AcceptRecoveryResponseProto.newBuilder() to construct.
17185        private AcceptRecoveryResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
17186          super(builder);
17187          this.unknownFields = builder.getUnknownFields();
17188        }
17189        private AcceptRecoveryResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
17190    
17191        private static final AcceptRecoveryResponseProto defaultInstance;
17192        public static AcceptRecoveryResponseProto getDefaultInstance() {
17193          return defaultInstance;
17194        }
17195    
17196        public AcceptRecoveryResponseProto getDefaultInstanceForType() {
17197          return defaultInstance;
17198        }
17199    
17200        private final com.google.protobuf.UnknownFieldSet unknownFields;
17201        @java.lang.Override
17202        public final com.google.protobuf.UnknownFieldSet
17203            getUnknownFields() {
17204          return this.unknownFields;
17205        }
17206        private AcceptRecoveryResponseProto(
17207            com.google.protobuf.CodedInputStream input,
17208            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17209            throws com.google.protobuf.InvalidProtocolBufferException {
17210          initFields();
17211          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
17212              com.google.protobuf.UnknownFieldSet.newBuilder();
17213          try {
17214            boolean done = false;
17215            while (!done) {
17216              int tag = input.readTag();
17217              switch (tag) {
17218                case 0:
17219                  done = true;
17220                  break;
17221                default: {
17222                  if (!parseUnknownField(input, unknownFields,
17223                                         extensionRegistry, tag)) {
17224                    done = true;
17225                  }
17226                  break;
17227                }
17228              }
17229            }
17230          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
17231            throw e.setUnfinishedMessage(this);
17232          } catch (java.io.IOException e) {
17233            throw new com.google.protobuf.InvalidProtocolBufferException(
17234                e.getMessage()).setUnfinishedMessage(this);
17235          } finally {
17236            this.unknownFields = unknownFields.build();
17237            makeExtensionsImmutable();
17238          }
17239        }
17240        public static final com.google.protobuf.Descriptors.Descriptor
17241            getDescriptor() {
17242          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor;
17243        }
17244    
17245        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
17246            internalGetFieldAccessorTable() {
17247          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_fieldAccessorTable
17248              .ensureFieldAccessorsInitialized(
17249                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.Builder.class);
17250        }
17251    
17252        public static com.google.protobuf.Parser<AcceptRecoveryResponseProto> PARSER =
17253            new com.google.protobuf.AbstractParser<AcceptRecoveryResponseProto>() {
17254          public AcceptRecoveryResponseProto parsePartialFrom(
17255              com.google.protobuf.CodedInputStream input,
17256              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17257              throws com.google.protobuf.InvalidProtocolBufferException {
17258            return new AcceptRecoveryResponseProto(input, extensionRegistry);
17259          }
17260        };
17261    
17262        @java.lang.Override
17263        public com.google.protobuf.Parser<AcceptRecoveryResponseProto> getParserForType() {
17264          return PARSER;
17265        }
17266    
17267        private void initFields() {
17268        }
17269        private byte memoizedIsInitialized = -1;
17270        public final boolean isInitialized() {
17271          byte isInitialized = memoizedIsInitialized;
17272          if (isInitialized != -1) return isInitialized == 1;
17273    
17274          memoizedIsInitialized = 1;
17275          return true;
17276        }
17277    
17278        public void writeTo(com.google.protobuf.CodedOutputStream output)
17279                            throws java.io.IOException {
17280          getSerializedSize();
17281          getUnknownFields().writeTo(output);
17282        }
17283    
17284        private int memoizedSerializedSize = -1;
17285        public int getSerializedSize() {
17286          int size = memoizedSerializedSize;
17287          if (size != -1) return size;
17288    
17289          size = 0;
17290          size += getUnknownFields().getSerializedSize();
17291          memoizedSerializedSize = size;
17292          return size;
17293        }
17294    
17295        private static final long serialVersionUID = 0L;
17296        @java.lang.Override
17297        protected java.lang.Object writeReplace()
17298            throws java.io.ObjectStreamException {
17299          return super.writeReplace();
17300        }
17301    
17302        @java.lang.Override
17303        public boolean equals(final java.lang.Object obj) {
17304          if (obj == this) {
17305           return true;
17306          }
17307          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto)) {
17308            return super.equals(obj);
17309          }
17310          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) obj;
17311    
17312          boolean result = true;
17313          result = result &&
17314              getUnknownFields().equals(other.getUnknownFields());
17315          return result;
17316        }
17317    
17318        private int memoizedHashCode = 0;
17319        @java.lang.Override
17320        public int hashCode() {
17321          if (memoizedHashCode != 0) {
17322            return memoizedHashCode;
17323          }
17324          int hash = 41;
17325          hash = (19 * hash) + getDescriptorForType().hashCode();
17326          hash = (29 * hash) + getUnknownFields().hashCode();
17327          memoizedHashCode = hash;
17328          return hash;
17329        }
17330    
17331        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
17332            com.google.protobuf.ByteString data)
17333            throws com.google.protobuf.InvalidProtocolBufferException {
17334          return PARSER.parseFrom(data);
17335        }
17336        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
17337            com.google.protobuf.ByteString data,
17338            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17339            throws com.google.protobuf.InvalidProtocolBufferException {
17340          return PARSER.parseFrom(data, extensionRegistry);
17341        }
17342        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(byte[] data)
17343            throws com.google.protobuf.InvalidProtocolBufferException {
17344          return PARSER.parseFrom(data);
17345        }
17346        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
17347            byte[] data,
17348            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17349            throws com.google.protobuf.InvalidProtocolBufferException {
17350          return PARSER.parseFrom(data, extensionRegistry);
17351        }
17352        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(java.io.InputStream input)
17353            throws java.io.IOException {
17354          return PARSER.parseFrom(input);
17355        }
17356        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
17357            java.io.InputStream input,
17358            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17359            throws java.io.IOException {
17360          return PARSER.parseFrom(input, extensionRegistry);
17361        }
17362        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseDelimitedFrom(java.io.InputStream input)
17363            throws java.io.IOException {
17364          return PARSER.parseDelimitedFrom(input);
17365        }
17366        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseDelimitedFrom(
17367            java.io.InputStream input,
17368            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17369            throws java.io.IOException {
17370          return PARSER.parseDelimitedFrom(input, extensionRegistry);
17371        }
17372        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
17373            com.google.protobuf.CodedInputStream input)
17374            throws java.io.IOException {
17375          return PARSER.parseFrom(input);
17376        }
17377        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
17378            com.google.protobuf.CodedInputStream input,
17379            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17380            throws java.io.IOException {
17381          return PARSER.parseFrom(input, extensionRegistry);
17382        }
17383    
17384        public static Builder newBuilder() { return Builder.create(); }
17385        public Builder newBuilderForType() { return newBuilder(); }
17386        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto prototype) {
17387          return newBuilder().mergeFrom(prototype);
17388        }
17389        public Builder toBuilder() { return newBuilder(this); }
17390    
17391        @java.lang.Override
17392        protected Builder newBuilderForType(
17393            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
17394          Builder builder = new Builder(parent);
17395          return builder;
17396        }
17397        /**
17398         * Protobuf type {@code hadoop.hdfs.AcceptRecoveryResponseProto}
17399         */
17400        public static final class Builder extends
17401            com.google.protobuf.GeneratedMessage.Builder<Builder>
17402           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProtoOrBuilder {
17403          public static final com.google.protobuf.Descriptors.Descriptor
17404              getDescriptor() {
17405            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor;
17406          }
17407    
17408          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
17409              internalGetFieldAccessorTable() {
17410            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_fieldAccessorTable
17411                .ensureFieldAccessorsInitialized(
17412                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.Builder.class);
17413          }
17414    
17415          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.newBuilder()
17416          private Builder() {
17417            maybeForceBuilderInitialization();
17418          }
17419    
17420          private Builder(
17421              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
17422            super(parent);
17423            maybeForceBuilderInitialization();
17424          }
17425          private void maybeForceBuilderInitialization() {
17426            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
17427            }
17428          }
17429          private static Builder create() {
17430            return new Builder();
17431          }
17432    
17433          public Builder clear() {
17434            super.clear();
17435            return this;
17436          }
17437    
17438          public Builder clone() {
17439            return create().mergeFrom(buildPartial());
17440          }
17441    
17442          public com.google.protobuf.Descriptors.Descriptor
17443              getDescriptorForType() {
17444            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor;
17445          }
17446    
17447          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto getDefaultInstanceForType() {
17448            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance();
17449          }
17450    
17451          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto build() {
17452            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto result = buildPartial();
17453            if (!result.isInitialized()) {
17454              throw newUninitializedMessageException(result);
17455            }
17456            return result;
17457          }
17458    
17459          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto buildPartial() {
17460            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto(this);
17461            onBuilt();
17462            return result;
17463          }
17464    
17465          public Builder mergeFrom(com.google.protobuf.Message other) {
17466            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) {
17467              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto)other);
17468            } else {
17469              super.mergeFrom(other);
17470              return this;
17471            }
17472          }
17473    
17474          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto other) {
17475            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance()) return this;
17476            this.mergeUnknownFields(other.getUnknownFields());
17477            return this;
17478          }
17479    
17480          public final boolean isInitialized() {
17481            return true;
17482          }
17483    
17484          public Builder mergeFrom(
17485              com.google.protobuf.CodedInputStream input,
17486              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17487              throws java.io.IOException {
17488            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parsedMessage = null;
17489            try {
17490              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
17491            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
17492              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) e.getUnfinishedMessage();
17493              throw e;
17494            } finally {
17495              if (parsedMessage != null) {
17496                mergeFrom(parsedMessage);
17497              }
17498            }
17499            return this;
17500          }
17501    
17502          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.AcceptRecoveryResponseProto)
17503        }
17504    
17505        static {
17506          defaultInstance = new AcceptRecoveryResponseProto(true);
17507          defaultInstance.initFields();
17508        }
17509    
17510        // @@protoc_insertion_point(class_scope:hadoop.hdfs.AcceptRecoveryResponseProto)
17511      }
17512    
17513      /**
17514       * Protobuf service {@code hadoop.hdfs.QJournalProtocolService}
17515       *
17516       * <pre>
17517       **
17518       * Protocol used to journal edits to a JournalNode.
17519       * See the request and response for details of rpc call.
17520       * </pre>
17521       */
17522      public static abstract class QJournalProtocolService
17523          implements com.google.protobuf.Service {
17524        protected QJournalProtocolService() {}
17525    
17526        public interface Interface {
17527          /**
17528           * <code>rpc isFormatted(.hadoop.hdfs.IsFormattedRequestProto) returns (.hadoop.hdfs.IsFormattedResponseProto);</code>
17529           */
17530          public abstract void isFormatted(
17531              com.google.protobuf.RpcController controller,
17532              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
17533              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto> done);
17534    
17535          /**
17536           * <code>rpc getJournalState(.hadoop.hdfs.GetJournalStateRequestProto) returns (.hadoop.hdfs.GetJournalStateResponseProto);</code>
17537           */
17538          public abstract void getJournalState(
17539              com.google.protobuf.RpcController controller,
17540              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
17541              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto> done);
17542    
17543          /**
17544           * <code>rpc newEpoch(.hadoop.hdfs.NewEpochRequestProto) returns (.hadoop.hdfs.NewEpochResponseProto);</code>
17545           */
17546          public abstract void newEpoch(
17547              com.google.protobuf.RpcController controller,
17548              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
17549              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto> done);
17550    
17551          /**
17552           * <code>rpc format(.hadoop.hdfs.FormatRequestProto) returns (.hadoop.hdfs.FormatResponseProto);</code>
17553           */
17554          public abstract void format(
17555              com.google.protobuf.RpcController controller,
17556              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
17557              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto> done);
17558    
17559          /**
17560           * <code>rpc journal(.hadoop.hdfs.JournalRequestProto) returns (.hadoop.hdfs.JournalResponseProto);</code>
17561           */
17562          public abstract void journal(
17563              com.google.protobuf.RpcController controller,
17564              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
17565              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto> done);
17566    
17567          /**
17568           * <code>rpc heartbeat(.hadoop.hdfs.HeartbeatRequestProto) returns (.hadoop.hdfs.HeartbeatResponseProto);</code>
17569           */
17570          public abstract void heartbeat(
17571              com.google.protobuf.RpcController controller,
17572              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
17573              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto> done);
17574    
17575          /**
17576           * <code>rpc startLogSegment(.hadoop.hdfs.StartLogSegmentRequestProto) returns (.hadoop.hdfs.StartLogSegmentResponseProto);</code>
17577           */
17578          public abstract void startLogSegment(
17579              com.google.protobuf.RpcController controller,
17580              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
17581              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto> done);
17582    
17583          /**
17584           * <code>rpc finalizeLogSegment(.hadoop.hdfs.FinalizeLogSegmentRequestProto) returns (.hadoop.hdfs.FinalizeLogSegmentResponseProto);</code>
17585           */
17586          public abstract void finalizeLogSegment(
17587              com.google.protobuf.RpcController controller,
17588              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
17589              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto> done);
17590    
17591          /**
17592           * <code>rpc purgeLogs(.hadoop.hdfs.PurgeLogsRequestProto) returns (.hadoop.hdfs.PurgeLogsResponseProto);</code>
17593           */
17594          public abstract void purgeLogs(
17595              com.google.protobuf.RpcController controller,
17596              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
17597              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto> done);
17598    
17599          /**
17600           * <code>rpc getEditLogManifest(.hadoop.hdfs.GetEditLogManifestRequestProto) returns (.hadoop.hdfs.GetEditLogManifestResponseProto);</code>
17601           */
17602          public abstract void getEditLogManifest(
17603              com.google.protobuf.RpcController controller,
17604              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
17605              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto> done);
17606    
17607          /**
17608           * <code>rpc prepareRecovery(.hadoop.hdfs.PrepareRecoveryRequestProto) returns (.hadoop.hdfs.PrepareRecoveryResponseProto);</code>
17609           */
17610          public abstract void prepareRecovery(
17611              com.google.protobuf.RpcController controller,
17612              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
17613              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto> done);
17614    
17615          /**
17616           * <code>rpc acceptRecovery(.hadoop.hdfs.AcceptRecoveryRequestProto) returns (.hadoop.hdfs.AcceptRecoveryResponseProto);</code>
17617           */
17618          public abstract void acceptRecovery(
17619              com.google.protobuf.RpcController controller,
17620              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
17621              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto> done);
17622    
17623        }
17624    
17625        public static com.google.protobuf.Service newReflectiveService(
17626            final Interface impl) {
17627          return new QJournalProtocolService() {
17628            @java.lang.Override
17629            public  void isFormatted(
17630                com.google.protobuf.RpcController controller,
17631                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
17632                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto> done) {
17633              impl.isFormatted(controller, request, done);
17634            }
17635    
17636            @java.lang.Override
17637            public  void getJournalState(
17638                com.google.protobuf.RpcController controller,
17639                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
17640                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto> done) {
17641              impl.getJournalState(controller, request, done);
17642            }
17643    
17644            @java.lang.Override
17645            public  void newEpoch(
17646                com.google.protobuf.RpcController controller,
17647                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
17648                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto> done) {
17649              impl.newEpoch(controller, request, done);
17650            }
17651    
17652            @java.lang.Override
17653            public  void format(
17654                com.google.protobuf.RpcController controller,
17655                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
17656                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto> done) {
17657              impl.format(controller, request, done);
17658            }
17659    
17660            @java.lang.Override
17661            public  void journal(
17662                com.google.protobuf.RpcController controller,
17663                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
17664                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto> done) {
17665              impl.journal(controller, request, done);
17666            }
17667    
17668            @java.lang.Override
17669            public  void heartbeat(
17670                com.google.protobuf.RpcController controller,
17671                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
17672                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto> done) {
17673              impl.heartbeat(controller, request, done);
17674            }
17675    
17676            @java.lang.Override
17677            public  void startLogSegment(
17678                com.google.protobuf.RpcController controller,
17679                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
17680                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto> done) {
17681              impl.startLogSegment(controller, request, done);
17682            }
17683    
17684            @java.lang.Override
17685            public  void finalizeLogSegment(
17686                com.google.protobuf.RpcController controller,
17687                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
17688                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto> done) {
17689              impl.finalizeLogSegment(controller, request, done);
17690            }
17691    
17692            @java.lang.Override
17693            public  void purgeLogs(
17694                com.google.protobuf.RpcController controller,
17695                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
17696                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto> done) {
17697              impl.purgeLogs(controller, request, done);
17698            }
17699    
17700            @java.lang.Override
17701            public  void getEditLogManifest(
17702                com.google.protobuf.RpcController controller,
17703                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
17704                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto> done) {
17705              impl.getEditLogManifest(controller, request, done);
17706            }
17707    
17708            @java.lang.Override
17709            public  void prepareRecovery(
17710                com.google.protobuf.RpcController controller,
17711                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
17712                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto> done) {
17713              impl.prepareRecovery(controller, request, done);
17714            }
17715    
17716            @java.lang.Override
17717            public  void acceptRecovery(
17718                com.google.protobuf.RpcController controller,
17719                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
17720                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto> done) {
17721              impl.acceptRecovery(controller, request, done);
17722            }
17723    
17724          };
17725        }
17726    
17727        public static com.google.protobuf.BlockingService
17728            newReflectiveBlockingService(final BlockingInterface impl) {
17729          return new com.google.protobuf.BlockingService() {
17730            public final com.google.protobuf.Descriptors.ServiceDescriptor
17731                getDescriptorForType() {
17732              return getDescriptor();
17733            }
17734    
17735            public final com.google.protobuf.Message callBlockingMethod(
17736                com.google.protobuf.Descriptors.MethodDescriptor method,
17737                com.google.protobuf.RpcController controller,
17738                com.google.protobuf.Message request)
17739                throws com.google.protobuf.ServiceException {
17740              if (method.getService() != getDescriptor()) {
17741                throw new java.lang.IllegalArgumentException(
17742                  "Service.callBlockingMethod() given method descriptor for " +
17743                  "wrong service type.");
17744              }
17745              switch(method.getIndex()) {
17746                case 0:
17747                  return impl.isFormatted(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)request);
17748                case 1:
17749                  return impl.getJournalState(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)request);
17750                case 2:
17751                  return impl.newEpoch(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)request);
17752                case 3:
17753                  return impl.format(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)request);
17754                case 4:
17755                  return impl.journal(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)request);
17756                case 5:
17757                  return impl.heartbeat(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)request);
17758                case 6:
17759                  return impl.startLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)request);
17760                case 7:
17761                  return impl.finalizeLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)request);
17762                case 8:
17763                  return impl.purgeLogs(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)request);
17764                case 9:
17765                  return impl.getEditLogManifest(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)request);
17766                case 10:
17767                  return impl.prepareRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)request);
17768                case 11:
17769                  return impl.acceptRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)request);
17770                default:
17771                  throw new java.lang.AssertionError("Can't get here.");
17772              }
17773            }
17774    
17775            public final com.google.protobuf.Message
17776                getRequestPrototype(
17777                com.google.protobuf.Descriptors.MethodDescriptor method) {
17778              if (method.getService() != getDescriptor()) {
17779                throw new java.lang.IllegalArgumentException(
17780                  "Service.getRequestPrototype() given method " +
17781                  "descriptor for wrong service type.");
17782              }
17783              switch(method.getIndex()) {
17784                case 0:
17785                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance();
17786                case 1:
17787                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance();
17788                case 2:
17789                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance();
17790                case 3:
17791                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance();
17792                case 4:
17793                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance();
17794                case 5:
17795                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
17796                case 6:
17797                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance();
17798                case 7:
17799                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance();
17800                case 8:
17801                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance();
17802                case 9:
17803                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance();
17804                case 10:
17805                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance();
17806                case 11:
17807                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance();
17808                default:
17809                  throw new java.lang.AssertionError("Can't get here.");
17810              }
17811            }
17812    
17813            public final com.google.protobuf.Message
17814                getResponsePrototype(
17815                com.google.protobuf.Descriptors.MethodDescriptor method) {
17816              if (method.getService() != getDescriptor()) {
17817                throw new java.lang.IllegalArgumentException(
17818                  "Service.getResponsePrototype() given method " +
17819                  "descriptor for wrong service type.");
17820              }
17821              switch(method.getIndex()) {
17822                case 0:
17823                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance();
17824                case 1:
17825                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance();
17826                case 2:
17827                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance();
17828                case 3:
17829                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance();
17830                case 4:
17831                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance();
17832                case 5:
17833                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
17834                case 6:
17835                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance();
17836                case 7:
17837                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance();
17838                case 8:
17839                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance();
17840                case 9:
17841                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance();
17842                case 10:
17843                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance();
17844                case 11:
17845                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance();
17846                default:
17847                  throw new java.lang.AssertionError("Can't get here.");
17848              }
17849            }
17850    
17851          };
17852        }
17853    
17854        /**
17855         * <code>rpc isFormatted(.hadoop.hdfs.IsFormattedRequestProto) returns (.hadoop.hdfs.IsFormattedResponseProto);</code>
17856         */
17857        public abstract void isFormatted(
17858            com.google.protobuf.RpcController controller,
17859            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
17860            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto> done);
17861    
17862        /**
17863         * <code>rpc getJournalState(.hadoop.hdfs.GetJournalStateRequestProto) returns (.hadoop.hdfs.GetJournalStateResponseProto);</code>
17864         */
17865        public abstract void getJournalState(
17866            com.google.protobuf.RpcController controller,
17867            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
17868            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto> done);
17869    
17870        /**
17871         * <code>rpc newEpoch(.hadoop.hdfs.NewEpochRequestProto) returns (.hadoop.hdfs.NewEpochResponseProto);</code>
17872         */
17873        public abstract void newEpoch(
17874            com.google.protobuf.RpcController controller,
17875            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
17876            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto> done);
17877    
17878        /**
17879         * <code>rpc format(.hadoop.hdfs.FormatRequestProto) returns (.hadoop.hdfs.FormatResponseProto);</code>
17880         */
17881        public abstract void format(
17882            com.google.protobuf.RpcController controller,
17883            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
17884            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto> done);
17885    
17886        /**
17887         * <code>rpc journal(.hadoop.hdfs.JournalRequestProto) returns (.hadoop.hdfs.JournalResponseProto);</code>
17888         */
17889        public abstract void journal(
17890            com.google.protobuf.RpcController controller,
17891            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
17892            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto> done);
17893    
17894        /**
17895         * <code>rpc heartbeat(.hadoop.hdfs.HeartbeatRequestProto) returns (.hadoop.hdfs.HeartbeatResponseProto);</code>
17896         */
17897        public abstract void heartbeat(
17898            com.google.protobuf.RpcController controller,
17899            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
17900            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto> done);
17901    
17902        /**
17903         * <code>rpc startLogSegment(.hadoop.hdfs.StartLogSegmentRequestProto) returns (.hadoop.hdfs.StartLogSegmentResponseProto);</code>
17904         */
17905        public abstract void startLogSegment(
17906            com.google.protobuf.RpcController controller,
17907            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
17908            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto> done);
17909    
17910        /**
17911         * <code>rpc finalizeLogSegment(.hadoop.hdfs.FinalizeLogSegmentRequestProto) returns (.hadoop.hdfs.FinalizeLogSegmentResponseProto);</code>
17912         */
17913        public abstract void finalizeLogSegment(
17914            com.google.protobuf.RpcController controller,
17915            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
17916            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto> done);
17917    
17918        /**
17919         * <code>rpc purgeLogs(.hadoop.hdfs.PurgeLogsRequestProto) returns (.hadoop.hdfs.PurgeLogsResponseProto);</code>
17920         */
17921        public abstract void purgeLogs(
17922            com.google.protobuf.RpcController controller,
17923            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
17924            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto> done);
17925    
17926        /**
17927         * <code>rpc getEditLogManifest(.hadoop.hdfs.GetEditLogManifestRequestProto) returns (.hadoop.hdfs.GetEditLogManifestResponseProto);</code>
17928         */
17929        public abstract void getEditLogManifest(
17930            com.google.protobuf.RpcController controller,
17931            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
17932            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto> done);
17933    
17934        /**
17935         * <code>rpc prepareRecovery(.hadoop.hdfs.PrepareRecoveryRequestProto) returns (.hadoop.hdfs.PrepareRecoveryResponseProto);</code>
17936         */
17937        public abstract void prepareRecovery(
17938            com.google.protobuf.RpcController controller,
17939            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
17940            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto> done);
17941    
17942        /**
17943         * <code>rpc acceptRecovery(.hadoop.hdfs.AcceptRecoveryRequestProto) returns (.hadoop.hdfs.AcceptRecoveryResponseProto);</code>
17944         */
17945        public abstract void acceptRecovery(
17946            com.google.protobuf.RpcController controller,
17947            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
17948            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto> done);
17949    
17950        public static final
17951            com.google.protobuf.Descriptors.ServiceDescriptor
17952            getDescriptor() {
17953          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.getDescriptor().getServices().get(0);
17954        }
17955        public final com.google.protobuf.Descriptors.ServiceDescriptor
17956            getDescriptorForType() {
17957          return getDescriptor();
17958        }
17959    
17960        public final void callMethod(
17961            com.google.protobuf.Descriptors.MethodDescriptor method,
17962            com.google.protobuf.RpcController controller,
17963            com.google.protobuf.Message request,
17964            com.google.protobuf.RpcCallback<
17965              com.google.protobuf.Message> done) {
17966          if (method.getService() != getDescriptor()) {
17967            throw new java.lang.IllegalArgumentException(
17968              "Service.callMethod() given method descriptor for wrong " +
17969              "service type.");
17970          }
17971          switch(method.getIndex()) {
17972            case 0:
17973              this.isFormatted(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)request,
17974                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto>specializeCallback(
17975                  done));
17976              return;
17977            case 1:
17978              this.getJournalState(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)request,
17979                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto>specializeCallback(
17980                  done));
17981              return;
17982            case 2:
17983              this.newEpoch(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)request,
17984                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto>specializeCallback(
17985                  done));
17986              return;
17987            case 3:
17988              this.format(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)request,
17989                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto>specializeCallback(
17990                  done));
17991              return;
17992            case 4:
17993              this.journal(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)request,
17994                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto>specializeCallback(
17995                  done));
17996              return;
17997            case 5:
17998              this.heartbeat(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)request,
17999                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto>specializeCallback(
18000                  done));
18001              return;
18002            case 6:
18003              this.startLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)request,
18004                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto>specializeCallback(
18005                  done));
18006              return;
18007            case 7:
18008              this.finalizeLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)request,
18009                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto>specializeCallback(
18010                  done));
18011              return;
18012            case 8:
18013              this.purgeLogs(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)request,
18014                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto>specializeCallback(
18015                  done));
18016              return;
18017            case 9:
18018              this.getEditLogManifest(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)request,
18019                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto>specializeCallback(
18020                  done));
18021              return;
18022            case 10:
18023              this.prepareRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)request,
18024                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto>specializeCallback(
18025                  done));
18026              return;
18027            case 11:
18028              this.acceptRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)request,
18029                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto>specializeCallback(
18030                  done));
18031              return;
18032            default:
18033              throw new java.lang.AssertionError("Can't get here.");
18034          }
18035        }
18036    
18037        public final com.google.protobuf.Message
18038            getRequestPrototype(
18039            com.google.protobuf.Descriptors.MethodDescriptor method) {
18040          if (method.getService() != getDescriptor()) {
18041            throw new java.lang.IllegalArgumentException(
18042              "Service.getRequestPrototype() given method " +
18043              "descriptor for wrong service type.");
18044          }
18045          switch(method.getIndex()) {
18046            case 0:
18047              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance();
18048            case 1:
18049              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance();
18050            case 2:
18051              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance();
18052            case 3:
18053              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance();
18054            case 4:
18055              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance();
18056            case 5:
18057              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
18058            case 6:
18059              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance();
18060            case 7:
18061              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance();
18062            case 8:
18063              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance();
18064            case 9:
18065              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance();
18066            case 10:
18067              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance();
18068            case 11:
18069              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance();
18070            default:
18071              throw new java.lang.AssertionError("Can't get here.");
18072          }
18073        }
18074    
18075        public final com.google.protobuf.Message
18076            getResponsePrototype(
18077            com.google.protobuf.Descriptors.MethodDescriptor method) {
18078          if (method.getService() != getDescriptor()) {
18079            throw new java.lang.IllegalArgumentException(
18080              "Service.getResponsePrototype() given method " +
18081              "descriptor for wrong service type.");
18082          }
18083          switch(method.getIndex()) {
18084            case 0:
18085              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance();
18086            case 1:
18087              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance();
18088            case 2:
18089              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance();
18090            case 3:
18091              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance();
18092            case 4:
18093              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance();
18094            case 5:
18095              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
18096            case 6:
18097              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance();
18098            case 7:
18099              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance();
18100            case 8:
18101              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance();
18102            case 9:
18103              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance();
18104            case 10:
18105              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance();
18106            case 11:
18107              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance();
18108            default:
18109              throw new java.lang.AssertionError("Can't get here.");
18110          }
18111        }
18112    
18113        public static Stub newStub(
18114            com.google.protobuf.RpcChannel channel) {
18115          return new Stub(channel);
18116        }
18117    
18118        public static final class Stub extends org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.QJournalProtocolService implements Interface {
18119          private Stub(com.google.protobuf.RpcChannel channel) {
18120            this.channel = channel;
18121          }
18122    
18123          private final com.google.protobuf.RpcChannel channel;
18124    
18125          public com.google.protobuf.RpcChannel getChannel() {
18126            return channel;
18127          }
18128    
18129          public  void isFormatted(
18130              com.google.protobuf.RpcController controller,
18131              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
18132              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto> done) {
18133            channel.callMethod(
18134              getDescriptor().getMethods().get(0),
18135              controller,
18136              request,
18137              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance(),
18138              com.google.protobuf.RpcUtil.generalizeCallback(
18139                done,
18140                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.class,
18141                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance()));
18142          }
18143    
18144          public  void getJournalState(
18145              com.google.protobuf.RpcController controller,
18146              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
18147              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto> done) {
18148            channel.callMethod(
18149              getDescriptor().getMethods().get(1),
18150              controller,
18151              request,
18152              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance(),
18153              com.google.protobuf.RpcUtil.generalizeCallback(
18154                done,
18155                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.class,
18156                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance()));
18157          }
18158    
18159          public  void newEpoch(
18160              com.google.protobuf.RpcController controller,
18161              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
18162              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto> done) {
18163            channel.callMethod(
18164              getDescriptor().getMethods().get(2),
18165              controller,
18166              request,
18167              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance(),
18168              com.google.protobuf.RpcUtil.generalizeCallback(
18169                done,
18170                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.class,
18171                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance()));
18172          }
18173    
18174          public  void format(
18175              com.google.protobuf.RpcController controller,
18176              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
18177              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto> done) {
18178            channel.callMethod(
18179              getDescriptor().getMethods().get(3),
18180              controller,
18181              request,
18182              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance(),
18183              com.google.protobuf.RpcUtil.generalizeCallback(
18184                done,
18185                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.class,
18186                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance()));
18187          }
18188    
18189          public  void journal(
18190              com.google.protobuf.RpcController controller,
18191              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
18192              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto> done) {
18193            channel.callMethod(
18194              getDescriptor().getMethods().get(4),
18195              controller,
18196              request,
18197              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance(),
18198              com.google.protobuf.RpcUtil.generalizeCallback(
18199                done,
18200                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.class,
18201                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance()));
18202          }
18203    
18204          public  void heartbeat(
18205              com.google.protobuf.RpcController controller,
18206              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
18207              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto> done) {
18208            channel.callMethod(
18209              getDescriptor().getMethods().get(5),
18210              controller,
18211              request,
18212              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance(),
18213              com.google.protobuf.RpcUtil.generalizeCallback(
18214                done,
18215                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.class,
18216                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance()));
18217          }
18218    
18219          public  void startLogSegment(
18220              com.google.protobuf.RpcController controller,
18221              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
18222              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto> done) {
18223            channel.callMethod(
18224              getDescriptor().getMethods().get(6),
18225              controller,
18226              request,
18227              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance(),
18228              com.google.protobuf.RpcUtil.generalizeCallback(
18229                done,
18230                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.class,
18231                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance()));
18232          }
18233    
18234          public  void finalizeLogSegment(
18235              com.google.protobuf.RpcController controller,
18236              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
18237              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto> done) {
18238            channel.callMethod(
18239              getDescriptor().getMethods().get(7),
18240              controller,
18241              request,
18242              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance(),
18243              com.google.protobuf.RpcUtil.generalizeCallback(
18244                done,
18245                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.class,
18246                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance()));
18247          }
18248    
18249          public  void purgeLogs(
18250              com.google.protobuf.RpcController controller,
18251              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
18252              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto> done) {
18253            channel.callMethod(
18254              getDescriptor().getMethods().get(8),
18255              controller,
18256              request,
18257              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance(),
18258              com.google.protobuf.RpcUtil.generalizeCallback(
18259                done,
18260                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.class,
18261                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance()));
18262          }
18263    
18264          public  void getEditLogManifest(
18265              com.google.protobuf.RpcController controller,
18266              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
18267              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto> done) {
18268            channel.callMethod(
18269              getDescriptor().getMethods().get(9),
18270              controller,
18271              request,
18272              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance(),
18273              com.google.protobuf.RpcUtil.generalizeCallback(
18274                done,
18275                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.class,
18276                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance()));
18277          }
18278    
18279          public  void prepareRecovery(
18280              com.google.protobuf.RpcController controller,
18281              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
18282              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto> done) {
18283            channel.callMethod(
18284              getDescriptor().getMethods().get(10),
18285              controller,
18286              request,
18287              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance(),
18288              com.google.protobuf.RpcUtil.generalizeCallback(
18289                done,
18290                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.class,
18291                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance()));
18292          }
18293    
18294          public  void acceptRecovery(
18295              com.google.protobuf.RpcController controller,
18296              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
18297              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto> done) {
18298            channel.callMethod(
18299              getDescriptor().getMethods().get(11),
18300              controller,
18301              request,
18302              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance(),
18303              com.google.protobuf.RpcUtil.generalizeCallback(
18304                done,
18305                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.class,
18306                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance()));
18307          }
18308        }
18309    
18310        public static BlockingInterface newBlockingStub(
18311            com.google.protobuf.BlockingRpcChannel channel) {
18312          return new BlockingStub(channel);
18313        }
18314    
18315        public interface BlockingInterface {
18316          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto isFormatted(
18317              com.google.protobuf.RpcController controller,
18318              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request)
18319              throws com.google.protobuf.ServiceException;
18320    
18321          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto getJournalState(
18322              com.google.protobuf.RpcController controller,
18323              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request)
18324              throws com.google.protobuf.ServiceException;
18325    
18326          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto newEpoch(
18327              com.google.protobuf.RpcController controller,
18328              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request)
18329              throws com.google.protobuf.ServiceException;
18330    
18331          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto format(
18332              com.google.protobuf.RpcController controller,
18333              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request)
18334              throws com.google.protobuf.ServiceException;
18335    
18336          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto journal(
18337              com.google.protobuf.RpcController controller,
18338              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request)
18339              throws com.google.protobuf.ServiceException;
18340    
18341          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto heartbeat(
18342              com.google.protobuf.RpcController controller,
18343              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request)
18344              throws com.google.protobuf.ServiceException;
18345    
18346          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto startLogSegment(
18347              com.google.protobuf.RpcController controller,
18348              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request)
18349              throws com.google.protobuf.ServiceException;
18350    
18351          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto finalizeLogSegment(
18352              com.google.protobuf.RpcController controller,
18353              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request)
18354              throws com.google.protobuf.ServiceException;
18355    
18356          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto purgeLogs(
18357              com.google.protobuf.RpcController controller,
18358              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request)
18359              throws com.google.protobuf.ServiceException;
18360    
18361          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto getEditLogManifest(
18362              com.google.protobuf.RpcController controller,
18363              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request)
18364              throws com.google.protobuf.ServiceException;
18365    
18366          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto prepareRecovery(
18367              com.google.protobuf.RpcController controller,
18368              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request)
18369              throws com.google.protobuf.ServiceException;
18370    
18371          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto acceptRecovery(
18372              com.google.protobuf.RpcController controller,
18373              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request)
18374              throws com.google.protobuf.ServiceException;
18375        }
18376    
18377        private static final class BlockingStub implements BlockingInterface {
18378          private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
18379            this.channel = channel;
18380          }
18381    
18382          private final com.google.protobuf.BlockingRpcChannel channel;
18383    
18384          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto isFormatted(
18385              com.google.protobuf.RpcController controller,
18386              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request)
18387              throws com.google.protobuf.ServiceException {
18388            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) channel.callBlockingMethod(
18389              getDescriptor().getMethods().get(0),
18390              controller,
18391              request,
18392              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance());
18393          }
18394    
18395    
18396          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto getJournalState(
18397              com.google.protobuf.RpcController controller,
18398              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request)
18399              throws com.google.protobuf.ServiceException {
18400            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) channel.callBlockingMethod(
18401              getDescriptor().getMethods().get(1),
18402              controller,
18403              request,
18404              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance());
18405          }
18406    
18407    
18408          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto newEpoch(
18409              com.google.protobuf.RpcController controller,
18410              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request)
18411              throws com.google.protobuf.ServiceException {
18412            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) channel.callBlockingMethod(
18413              getDescriptor().getMethods().get(2),
18414              controller,
18415              request,
18416              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance());
18417          }
18418    
18419    
18420          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto format(
18421              com.google.protobuf.RpcController controller,
18422              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request)
18423              throws com.google.protobuf.ServiceException {
18424            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) channel.callBlockingMethod(
18425              getDescriptor().getMethods().get(3),
18426              controller,
18427              request,
18428              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance());
18429          }
18430    
18431    
18432          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto journal(
18433              com.google.protobuf.RpcController controller,
18434              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request)
18435              throws com.google.protobuf.ServiceException {
18436            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) channel.callBlockingMethod(
18437              getDescriptor().getMethods().get(4),
18438              controller,
18439              request,
18440              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance());
18441          }
18442    
18443    
18444          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto heartbeat(
18445              com.google.protobuf.RpcController controller,
18446              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request)
18447              throws com.google.protobuf.ServiceException {
18448            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) channel.callBlockingMethod(
18449              getDescriptor().getMethods().get(5),
18450              controller,
18451              request,
18452              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance());
18453          }
18454    
18455    
18456          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto startLogSegment(
18457              com.google.protobuf.RpcController controller,
18458              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request)
18459              throws com.google.protobuf.ServiceException {
18460            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) channel.callBlockingMethod(
18461              getDescriptor().getMethods().get(6),
18462              controller,
18463              request,
18464              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance());
18465          }
18466    
18467    
18468          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto finalizeLogSegment(
18469              com.google.protobuf.RpcController controller,
18470              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request)
18471              throws com.google.protobuf.ServiceException {
18472            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) channel.callBlockingMethod(
18473              getDescriptor().getMethods().get(7),
18474              controller,
18475              request,
18476              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance());
18477          }
18478    
18479    
18480          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto purgeLogs(
18481              com.google.protobuf.RpcController controller,
18482              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request)
18483              throws com.google.protobuf.ServiceException {
18484            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) channel.callBlockingMethod(
18485              getDescriptor().getMethods().get(8),
18486              controller,
18487              request,
18488              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance());
18489          }
18490    
18491    
18492          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto getEditLogManifest(
18493              com.google.protobuf.RpcController controller,
18494              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request)
18495              throws com.google.protobuf.ServiceException {
18496            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) channel.callBlockingMethod(
18497              getDescriptor().getMethods().get(9),
18498              controller,
18499              request,
18500              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance());
18501          }
18502    
18503    
18504          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto prepareRecovery(
18505              com.google.protobuf.RpcController controller,
18506              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request)
18507              throws com.google.protobuf.ServiceException {
18508            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) channel.callBlockingMethod(
18509              getDescriptor().getMethods().get(10),
18510              controller,
18511              request,
18512              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance());
18513          }
18514    
18515    
18516          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto acceptRecovery(
18517              com.google.protobuf.RpcController controller,
18518              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request)
18519              throws com.google.protobuf.ServiceException {
18520            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) channel.callBlockingMethod(
18521              getDescriptor().getMethods().get(11),
18522              controller,
18523              request,
18524              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance());
18525          }
18526    
18527        }
18528    
18529        // @@protoc_insertion_point(class_scope:hadoop.hdfs.QJournalProtocolService)
18530      }
18531    
18532      private static com.google.protobuf.Descriptors.Descriptor
18533        internal_static_hadoop_hdfs_JournalIdProto_descriptor;
18534      private static
18535        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18536          internal_static_hadoop_hdfs_JournalIdProto_fieldAccessorTable;
18537      private static com.google.protobuf.Descriptors.Descriptor
18538        internal_static_hadoop_hdfs_RequestInfoProto_descriptor;
18539      private static
18540        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18541          internal_static_hadoop_hdfs_RequestInfoProto_fieldAccessorTable;
18542      private static com.google.protobuf.Descriptors.Descriptor
18543        internal_static_hadoop_hdfs_SegmentStateProto_descriptor;
18544      private static
18545        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18546          internal_static_hadoop_hdfs_SegmentStateProto_fieldAccessorTable;
18547      private static com.google.protobuf.Descriptors.Descriptor
18548        internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor;
18549      private static
18550        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18551          internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_fieldAccessorTable;
18552      private static com.google.protobuf.Descriptors.Descriptor
18553        internal_static_hadoop_hdfs_JournalRequestProto_descriptor;
18554      private static
18555        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18556          internal_static_hadoop_hdfs_JournalRequestProto_fieldAccessorTable;
18557      private static com.google.protobuf.Descriptors.Descriptor
18558        internal_static_hadoop_hdfs_JournalResponseProto_descriptor;
18559      private static
18560        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18561          internal_static_hadoop_hdfs_JournalResponseProto_fieldAccessorTable;
18562      private static com.google.protobuf.Descriptors.Descriptor
18563        internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor;
18564      private static
18565        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18566          internal_static_hadoop_hdfs_HeartbeatRequestProto_fieldAccessorTable;
18567      private static com.google.protobuf.Descriptors.Descriptor
18568        internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor;
18569      private static
18570        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18571          internal_static_hadoop_hdfs_HeartbeatResponseProto_fieldAccessorTable;
18572      private static com.google.protobuf.Descriptors.Descriptor
18573        internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor;
18574      private static
18575        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18576          internal_static_hadoop_hdfs_StartLogSegmentRequestProto_fieldAccessorTable;
18577      private static com.google.protobuf.Descriptors.Descriptor
18578        internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor;
18579      private static
18580        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18581          internal_static_hadoop_hdfs_StartLogSegmentResponseProto_fieldAccessorTable;
18582      private static com.google.protobuf.Descriptors.Descriptor
18583        internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor;
18584      private static
18585        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18586          internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_fieldAccessorTable;
18587      private static com.google.protobuf.Descriptors.Descriptor
18588        internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor;
18589      private static
18590        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18591          internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_fieldAccessorTable;
18592      private static com.google.protobuf.Descriptors.Descriptor
18593        internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor;
18594      private static
18595        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18596          internal_static_hadoop_hdfs_PurgeLogsRequestProto_fieldAccessorTable;
18597      private static com.google.protobuf.Descriptors.Descriptor
18598        internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor;
18599      private static
18600        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18601          internal_static_hadoop_hdfs_PurgeLogsResponseProto_fieldAccessorTable;
18602      private static com.google.protobuf.Descriptors.Descriptor
18603        internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor;
18604      private static
18605        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18606          internal_static_hadoop_hdfs_IsFormattedRequestProto_fieldAccessorTable;
18607      private static com.google.protobuf.Descriptors.Descriptor
18608        internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor;
18609      private static
18610        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18611          internal_static_hadoop_hdfs_IsFormattedResponseProto_fieldAccessorTable;
18612      private static com.google.protobuf.Descriptors.Descriptor
18613        internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor;
18614      private static
18615        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18616          internal_static_hadoop_hdfs_GetJournalStateRequestProto_fieldAccessorTable;
18617      private static com.google.protobuf.Descriptors.Descriptor
18618        internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor;
18619      private static
18620        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18621          internal_static_hadoop_hdfs_GetJournalStateResponseProto_fieldAccessorTable;
18622      private static com.google.protobuf.Descriptors.Descriptor
18623        internal_static_hadoop_hdfs_FormatRequestProto_descriptor;
18624      private static
18625        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18626          internal_static_hadoop_hdfs_FormatRequestProto_fieldAccessorTable;
18627      private static com.google.protobuf.Descriptors.Descriptor
18628        internal_static_hadoop_hdfs_FormatResponseProto_descriptor;
18629      private static
18630        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18631          internal_static_hadoop_hdfs_FormatResponseProto_fieldAccessorTable;
18632      private static com.google.protobuf.Descriptors.Descriptor
18633        internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor;
18634      private static
18635        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18636          internal_static_hadoop_hdfs_NewEpochRequestProto_fieldAccessorTable;
18637      private static com.google.protobuf.Descriptors.Descriptor
18638        internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor;
18639      private static
18640        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18641          internal_static_hadoop_hdfs_NewEpochResponseProto_fieldAccessorTable;
18642      private static com.google.protobuf.Descriptors.Descriptor
18643        internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor;
18644      private static
18645        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18646          internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_fieldAccessorTable;
18647      private static com.google.protobuf.Descriptors.Descriptor
18648        internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor;
18649      private static
18650        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18651          internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_fieldAccessorTable;
18652      private static com.google.protobuf.Descriptors.Descriptor
18653        internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor;
18654      private static
18655        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18656          internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_fieldAccessorTable;
18657      private static com.google.protobuf.Descriptors.Descriptor
18658        internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor;
18659      private static
18660        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18661          internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_fieldAccessorTable;
18662      private static com.google.protobuf.Descriptors.Descriptor
18663        internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor;
18664      private static
18665        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18666          internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_fieldAccessorTable;
18667      private static com.google.protobuf.Descriptors.Descriptor
18668        internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor;
18669      private static
18670        com.google.protobuf.GeneratedMessage.FieldAccessorTable
18671          internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_fieldAccessorTable;
18672    
18673      public static com.google.protobuf.Descriptors.FileDescriptor
18674          getDescriptor() {
18675        return descriptor;
18676      }
18677      private static com.google.protobuf.Descriptors.FileDescriptor
18678          descriptor;
18679      static {
18680        java.lang.String[] descriptorData = {
18681          "\n\026QJournalProtocol.proto\022\013hadoop.hdfs\032\nh" +
18682          "dfs.proto\"$\n\016JournalIdProto\022\022\n\nidentifie" +
18683          "r\030\001 \002(\t\"\201\001\n\020RequestInfoProto\022.\n\tjournalI" +
18684          "d\030\001 \002(\0132\033.hadoop.hdfs.JournalIdProto\022\r\n\005" +
18685          "epoch\030\002 \002(\004\022\027\n\017ipcSerialNumber\030\003 \002(\004\022\025\n\r" +
18686          "committedTxId\030\004 \001(\004\"M\n\021SegmentStateProto" +
18687          "\022\021\n\tstartTxId\030\001 \002(\004\022\017\n\007endTxId\030\002 \002(\004\022\024\n\014" +
18688          "isInProgress\030\003 \002(\010\"k\n\032PersistedRecoveryP" +
18689          "axosData\0224\n\014segmentState\030\001 \002(\0132\036.hadoop." +
18690          "hdfs.SegmentStateProto\022\027\n\017acceptedInEpoc",
18691          "h\030\002 \002(\004\"\221\001\n\023JournalRequestProto\022.\n\007reqIn" +
18692          "fo\030\001 \002(\0132\035.hadoop.hdfs.RequestInfoProto\022" +
18693          "\022\n\nfirstTxnId\030\002 \002(\004\022\017\n\007numTxns\030\003 \002(\r\022\017\n\007" +
18694          "records\030\004 \002(\014\022\024\n\014segmentTxnId\030\005 \002(\004\"\026\n\024J" +
18695          "ournalResponseProto\"G\n\025HeartbeatRequestP" +
18696          "roto\022.\n\007reqInfo\030\001 \002(\0132\035.hadoop.hdfs.Requ" +
18697          "estInfoProto\"\030\n\026HeartbeatResponseProto\"[" +
18698          "\n\033StartLogSegmentRequestProto\022.\n\007reqInfo" +
18699          "\030\001 \002(\0132\035.hadoop.hdfs.RequestInfoProto\022\014\n" +
18700          "\004txid\030\002 \002(\004\"\036\n\034StartLogSegmentResponsePr",
18701          "oto\"t\n\036FinalizeLogSegmentRequestProto\022.\n" +
18702          "\007reqInfo\030\001 \002(\0132\035.hadoop.hdfs.RequestInfo" +
18703          "Proto\022\021\n\tstartTxId\030\002 \002(\004\022\017\n\007endTxId\030\003 \002(" +
18704          "\004\"!\n\037FinalizeLogSegmentResponseProto\"^\n\025" +
18705          "PurgeLogsRequestProto\022.\n\007reqInfo\030\001 \002(\0132\035" +
18706          ".hadoop.hdfs.RequestInfoProto\022\025\n\rminTxId" +
18707          "ToKeep\030\002 \002(\004\"\030\n\026PurgeLogsResponseProto\"C" +
18708          "\n\027IsFormattedRequestProto\022(\n\003jid\030\001 \002(\0132\033" +
18709          ".hadoop.hdfs.JournalIdProto\"/\n\030IsFormatt" +
18710          "edResponseProto\022\023\n\013isFormatted\030\001 \002(\010\"G\n\033",
18711          "GetJournalStateRequestProto\022(\n\003jid\030\001 \002(\013" +
18712          "2\033.hadoop.hdfs.JournalIdProto\"\\\n\034GetJour" +
18713          "nalStateResponseProto\022\031\n\021lastPromisedEpo" +
18714          "ch\030\001 \002(\004\022\020\n\010httpPort\030\002 \002(\r\022\017\n\007fromURL\030\003 " +
18715          "\001(\t\"o\n\022FormatRequestProto\022(\n\003jid\030\001 \002(\0132\033" +
18716          ".hadoop.hdfs.JournalIdProto\022/\n\006nsInfo\030\002 " +
18717          "\002(\0132\037.hadoop.hdfs.NamespaceInfoProto\"\025\n\023" +
18718          "FormatResponseProto\"\200\001\n\024NewEpochRequestP" +
18719          "roto\022(\n\003jid\030\001 \002(\0132\033.hadoop.hdfs.JournalI" +
18720          "dProto\022/\n\006nsInfo\030\002 \002(\0132\037.hadoop.hdfs.Nam",
18721          "espaceInfoProto\022\r\n\005epoch\030\003 \002(\004\"0\n\025NewEpo" +
18722          "chResponseProto\022\027\n\017lastSegmentTxId\030\001 \001(\004" +
18723          "\"z\n\036GetEditLogManifestRequestProto\022(\n\003ji" +
18724          "d\030\001 \002(\0132\033.hadoop.hdfs.JournalIdProto\022\021\n\t" +
18725          "sinceTxId\030\002 \002(\004\022\033\n\014inProgressOk\030\004 \001(\010:\005f" +
18726          "alse\"\177\n\037GetEditLogManifestResponseProto\022" +
18727          "9\n\010manifest\030\001 \002(\0132\'.hadoop.hdfs.RemoteEd" +
18728          "itLogManifestProto\022\020\n\010httpPort\030\002 \002(\r\022\017\n\007" +
18729          "fromURL\030\003 \001(\t\"b\n\033PrepareRecoveryRequestP" +
18730          "roto\022.\n\007reqInfo\030\001 \002(\0132\035.hadoop.hdfs.Requ",
18731          "estInfoProto\022\023\n\013segmentTxId\030\002 \002(\004\"\241\001\n\034Pr" +
18732          "epareRecoveryResponseProto\0224\n\014segmentSta" +
18733          "te\030\001 \001(\0132\036.hadoop.hdfs.SegmentStateProto" +
18734          "\022\027\n\017acceptedInEpoch\030\002 \001(\004\022\027\n\017lastWriterE" +
18735          "poch\030\003 \002(\004\022\031\n\021lastCommittedTxId\030\004 \001(\004\"\224\001" +
18736          "\n\032AcceptRecoveryRequestProto\022.\n\007reqInfo\030" +
18737          "\001 \002(\0132\035.hadoop.hdfs.RequestInfoProto\0225\n\r" +
18738          "stateToAccept\030\002 \002(\0132\036.hadoop.hdfs.Segmen" +
18739          "tStateProto\022\017\n\007fromURL\030\003 \002(\t\"\035\n\033AcceptRe" +
18740          "coveryResponseProto2\220\t\n\027QJournalProtocol",
18741          "Service\022Z\n\013isFormatted\022$.hadoop.hdfs.IsF" +
18742          "ormattedRequestProto\032%.hadoop.hdfs.IsFor" +
18743          "mattedResponseProto\022f\n\017getJournalState\022(" +
18744          ".hadoop.hdfs.GetJournalStateRequestProto" +
18745          "\032).hadoop.hdfs.GetJournalStateResponsePr" +
18746          "oto\022Q\n\010newEpoch\022!.hadoop.hdfs.NewEpochRe" +
18747          "questProto\032\".hadoop.hdfs.NewEpochRespons" +
18748          "eProto\022K\n\006format\022\037.hadoop.hdfs.FormatReq" +
18749          "uestProto\032 .hadoop.hdfs.FormatResponsePr" +
18750          "oto\022N\n\007journal\022 .hadoop.hdfs.JournalRequ",
18751          "estProto\032!.hadoop.hdfs.JournalResponsePr" +
18752          "oto\022T\n\theartbeat\022\".hadoop.hdfs.Heartbeat" +
18753          "RequestProto\032#.hadoop.hdfs.HeartbeatResp" +
18754          "onseProto\022f\n\017startLogSegment\022(.hadoop.hd" +
18755          "fs.StartLogSegmentRequestProto\032).hadoop." +
18756          "hdfs.StartLogSegmentResponseProto\022o\n\022fin" +
18757          "alizeLogSegment\022+.hadoop.hdfs.FinalizeLo" +
18758          "gSegmentRequestProto\032,.hadoop.hdfs.Final" +
18759          "izeLogSegmentResponseProto\022T\n\tpurgeLogs\022" +
18760          "\".hadoop.hdfs.PurgeLogsRequestProto\032#.ha",
18761          "doop.hdfs.PurgeLogsResponseProto\022o\n\022getE" +
18762          "ditLogManifest\022+.hadoop.hdfs.GetEditLogM" +
18763          "anifestRequestProto\032,.hadoop.hdfs.GetEdi" +
18764          "tLogManifestResponseProto\022f\n\017prepareReco" +
18765          "very\022(.hadoop.hdfs.PrepareRecoveryReques" +
18766          "tProto\032).hadoop.hdfs.PrepareRecoveryResp" +
18767          "onseProto\022c\n\016acceptRecovery\022\'.hadoop.hdf" +
18768          "s.AcceptRecoveryRequestProto\032(.hadoop.hd" +
18769          "fs.AcceptRecoveryResponseProtoBH\n(org.ap" +
18770          "ache.hadoop.hdfs.qjournal.protocolB\026QJou",
18771          "rnalProtocolProtos\210\001\001\240\001\001"
18772        };
18773        com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
18774          new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
18775            public com.google.protobuf.ExtensionRegistry assignDescriptors(
18776                com.google.protobuf.Descriptors.FileDescriptor root) {
18777              descriptor = root;
18778              internal_static_hadoop_hdfs_JournalIdProto_descriptor =
18779                getDescriptor().getMessageTypes().get(0);
18780              internal_static_hadoop_hdfs_JournalIdProto_fieldAccessorTable = new
18781                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18782                  internal_static_hadoop_hdfs_JournalIdProto_descriptor,
18783                  new java.lang.String[] { "Identifier", });
18784              internal_static_hadoop_hdfs_RequestInfoProto_descriptor =
18785                getDescriptor().getMessageTypes().get(1);
18786              internal_static_hadoop_hdfs_RequestInfoProto_fieldAccessorTable = new
18787                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18788                  internal_static_hadoop_hdfs_RequestInfoProto_descriptor,
18789                  new java.lang.String[] { "JournalId", "Epoch", "IpcSerialNumber", "CommittedTxId", });
18790              internal_static_hadoop_hdfs_SegmentStateProto_descriptor =
18791                getDescriptor().getMessageTypes().get(2);
18792              internal_static_hadoop_hdfs_SegmentStateProto_fieldAccessorTable = new
18793                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18794                  internal_static_hadoop_hdfs_SegmentStateProto_descriptor,
18795                  new java.lang.String[] { "StartTxId", "EndTxId", "IsInProgress", });
18796              internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor =
18797                getDescriptor().getMessageTypes().get(3);
18798              internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_fieldAccessorTable = new
18799                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18800                  internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor,
18801                  new java.lang.String[] { "SegmentState", "AcceptedInEpoch", });
18802              internal_static_hadoop_hdfs_JournalRequestProto_descriptor =
18803                getDescriptor().getMessageTypes().get(4);
18804              internal_static_hadoop_hdfs_JournalRequestProto_fieldAccessorTable = new
18805                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18806                  internal_static_hadoop_hdfs_JournalRequestProto_descriptor,
18807                  new java.lang.String[] { "ReqInfo", "FirstTxnId", "NumTxns", "Records", "SegmentTxnId", });
18808              internal_static_hadoop_hdfs_JournalResponseProto_descriptor =
18809                getDescriptor().getMessageTypes().get(5);
18810              internal_static_hadoop_hdfs_JournalResponseProto_fieldAccessorTable = new
18811                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18812                  internal_static_hadoop_hdfs_JournalResponseProto_descriptor,
18813                  new java.lang.String[] { });
18814              internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor =
18815                getDescriptor().getMessageTypes().get(6);
18816              internal_static_hadoop_hdfs_HeartbeatRequestProto_fieldAccessorTable = new
18817                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18818                  internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor,
18819                  new java.lang.String[] { "ReqInfo", });
18820              internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor =
18821                getDescriptor().getMessageTypes().get(7);
18822              internal_static_hadoop_hdfs_HeartbeatResponseProto_fieldAccessorTable = new
18823                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18824                  internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor,
18825                  new java.lang.String[] { });
18826              internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor =
18827                getDescriptor().getMessageTypes().get(8);
18828              internal_static_hadoop_hdfs_StartLogSegmentRequestProto_fieldAccessorTable = new
18829                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18830                  internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor,
18831                  new java.lang.String[] { "ReqInfo", "Txid", });
18832              internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor =
18833                getDescriptor().getMessageTypes().get(9);
18834              internal_static_hadoop_hdfs_StartLogSegmentResponseProto_fieldAccessorTable = new
18835                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18836                  internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor,
18837                  new java.lang.String[] { });
18838              internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor =
18839                getDescriptor().getMessageTypes().get(10);
18840              internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_fieldAccessorTable = new
18841                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18842                  internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor,
18843                  new java.lang.String[] { "ReqInfo", "StartTxId", "EndTxId", });
18844              internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor =
18845                getDescriptor().getMessageTypes().get(11);
18846              internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_fieldAccessorTable = new
18847                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18848                  internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor,
18849                  new java.lang.String[] { });
18850              internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor =
18851                getDescriptor().getMessageTypes().get(12);
18852              internal_static_hadoop_hdfs_PurgeLogsRequestProto_fieldAccessorTable = new
18853                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18854                  internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor,
18855                  new java.lang.String[] { "ReqInfo", "MinTxIdToKeep", });
18856              internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor =
18857                getDescriptor().getMessageTypes().get(13);
18858              internal_static_hadoop_hdfs_PurgeLogsResponseProto_fieldAccessorTable = new
18859                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18860                  internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor,
18861                  new java.lang.String[] { });
18862              internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor =
18863                getDescriptor().getMessageTypes().get(14);
18864              internal_static_hadoop_hdfs_IsFormattedRequestProto_fieldAccessorTable = new
18865                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18866                  internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor,
18867                  new java.lang.String[] { "Jid", });
18868              internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor =
18869                getDescriptor().getMessageTypes().get(15);
18870              internal_static_hadoop_hdfs_IsFormattedResponseProto_fieldAccessorTable = new
18871                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18872                  internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor,
18873                  new java.lang.String[] { "IsFormatted", });
18874              internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor =
18875                getDescriptor().getMessageTypes().get(16);
18876              internal_static_hadoop_hdfs_GetJournalStateRequestProto_fieldAccessorTable = new
18877                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18878                  internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor,
18879                  new java.lang.String[] { "Jid", });
18880              internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor =
18881                getDescriptor().getMessageTypes().get(17);
18882              internal_static_hadoop_hdfs_GetJournalStateResponseProto_fieldAccessorTable = new
18883                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18884                  internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor,
18885                  new java.lang.String[] { "LastPromisedEpoch", "HttpPort", "FromURL", });
18886              internal_static_hadoop_hdfs_FormatRequestProto_descriptor =
18887                getDescriptor().getMessageTypes().get(18);
18888              internal_static_hadoop_hdfs_FormatRequestProto_fieldAccessorTable = new
18889                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18890                  internal_static_hadoop_hdfs_FormatRequestProto_descriptor,
18891                  new java.lang.String[] { "Jid", "NsInfo", });
18892              internal_static_hadoop_hdfs_FormatResponseProto_descriptor =
18893                getDescriptor().getMessageTypes().get(19);
18894              internal_static_hadoop_hdfs_FormatResponseProto_fieldAccessorTable = new
18895                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18896                  internal_static_hadoop_hdfs_FormatResponseProto_descriptor,
18897                  new java.lang.String[] { });
18898              internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor =
18899                getDescriptor().getMessageTypes().get(20);
18900              internal_static_hadoop_hdfs_NewEpochRequestProto_fieldAccessorTable = new
18901                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18902                  internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor,
18903                  new java.lang.String[] { "Jid", "NsInfo", "Epoch", });
18904              internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor =
18905                getDescriptor().getMessageTypes().get(21);
18906              internal_static_hadoop_hdfs_NewEpochResponseProto_fieldAccessorTable = new
18907                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18908                  internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor,
18909                  new java.lang.String[] { "LastSegmentTxId", });
18910              internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor =
18911                getDescriptor().getMessageTypes().get(22);
18912              internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_fieldAccessorTable = new
18913                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18914                  internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor,
18915                  new java.lang.String[] { "Jid", "SinceTxId", "InProgressOk", });
18916              internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor =
18917                getDescriptor().getMessageTypes().get(23);
18918              internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_fieldAccessorTable = new
18919                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18920                  internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor,
18921                  new java.lang.String[] { "Manifest", "HttpPort", "FromURL", });
18922              internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor =
18923                getDescriptor().getMessageTypes().get(24);
18924              internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_fieldAccessorTable = new
18925                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18926                  internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor,
18927                  new java.lang.String[] { "ReqInfo", "SegmentTxId", });
18928              internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor =
18929                getDescriptor().getMessageTypes().get(25);
18930              internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_fieldAccessorTable = new
18931                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18932                  internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor,
18933                  new java.lang.String[] { "SegmentState", "AcceptedInEpoch", "LastWriterEpoch", "LastCommittedTxId", });
18934              internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor =
18935                getDescriptor().getMessageTypes().get(26);
18936              internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_fieldAccessorTable = new
18937                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18938                  internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor,
18939                  new java.lang.String[] { "ReqInfo", "StateToAccept", "FromURL", });
18940              internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor =
18941                getDescriptor().getMessageTypes().get(27);
18942              internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_fieldAccessorTable = new
18943                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
18944                  internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor,
18945                  new java.lang.String[] { });
18946              return null;
18947            }
18948          };
18949        com.google.protobuf.Descriptors.FileDescriptor
18950          .internalBuildGeneratedFileFrom(descriptorData,
18951            new com.google.protobuf.Descriptors.FileDescriptor[] {
18952              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(),
18953            }, assigner);
18954      }
18955    
18956      // @@protoc_insertion_point(outer_class_scope)
18957    }