001    // Generated by the protocol buffer compiler.  DO NOT EDIT!
002    // source: QJournalProtocol.proto
003    
004    package org.apache.hadoop.hdfs.qjournal.protocol;
005    
006    public final class QJournalProtocolProtos {
007      private QJournalProtocolProtos() {}
008      public static void registerAllExtensions(
009          com.google.protobuf.ExtensionRegistry registry) {
010      }
011      public interface JournalIdProtoOrBuilder
012          extends com.google.protobuf.MessageOrBuilder {
013    
014        // required string identifier = 1;
015        /**
016         * <code>required string identifier = 1;</code>
017         */
018        boolean hasIdentifier();
019        /**
020         * <code>required string identifier = 1;</code>
021         */
022        java.lang.String getIdentifier();
023        /**
024         * <code>required string identifier = 1;</code>
025         */
026        com.google.protobuf.ByteString
027            getIdentifierBytes();
028      }
029      /**
030       * Protobuf type {@code hadoop.hdfs.qjournal.JournalIdProto}
031       */
032      public static final class JournalIdProto extends
033          com.google.protobuf.GeneratedMessage
034          implements JournalIdProtoOrBuilder {
035        // Use JournalIdProto.newBuilder() to construct.
036        private JournalIdProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
037          super(builder);
038          this.unknownFields = builder.getUnknownFields();
039        }
040        private JournalIdProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
041    
042        private static final JournalIdProto defaultInstance;
043        public static JournalIdProto getDefaultInstance() {
044          return defaultInstance;
045        }
046    
047        public JournalIdProto getDefaultInstanceForType() {
048          return defaultInstance;
049        }
050    
051        private final com.google.protobuf.UnknownFieldSet unknownFields;
052        @java.lang.Override
053        public final com.google.protobuf.UnknownFieldSet
054            getUnknownFields() {
055          return this.unknownFields;
056        }
057        private JournalIdProto(
058            com.google.protobuf.CodedInputStream input,
059            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
060            throws com.google.protobuf.InvalidProtocolBufferException {
061          initFields();
062          int mutable_bitField0_ = 0;
063          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
064              com.google.protobuf.UnknownFieldSet.newBuilder();
065          try {
066            boolean done = false;
067            while (!done) {
068              int tag = input.readTag();
069              switch (tag) {
070                case 0:
071                  done = true;
072                  break;
073                default: {
074                  if (!parseUnknownField(input, unknownFields,
075                                         extensionRegistry, tag)) {
076                    done = true;
077                  }
078                  break;
079                }
080                case 10: {
081                  bitField0_ |= 0x00000001;
082                  identifier_ = input.readBytes();
083                  break;
084                }
085              }
086            }
087          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
088            throw e.setUnfinishedMessage(this);
089          } catch (java.io.IOException e) {
090            throw new com.google.protobuf.InvalidProtocolBufferException(
091                e.getMessage()).setUnfinishedMessage(this);
092          } finally {
093            this.unknownFields = unknownFields.build();
094            makeExtensionsImmutable();
095          }
096        }
097        public static final com.google.protobuf.Descriptors.Descriptor
098            getDescriptor() {
099          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalIdProto_descriptor;
100        }
101    
102        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
103            internalGetFieldAccessorTable() {
104          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalIdProto_fieldAccessorTable
105              .ensureFieldAccessorsInitialized(
106                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder.class);
107        }
108    
109        public static com.google.protobuf.Parser<JournalIdProto> PARSER =
110            new com.google.protobuf.AbstractParser<JournalIdProto>() {
111          public JournalIdProto parsePartialFrom(
112              com.google.protobuf.CodedInputStream input,
113              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
114              throws com.google.protobuf.InvalidProtocolBufferException {
115            return new JournalIdProto(input, extensionRegistry);
116          }
117        };
118    
119        @java.lang.Override
120        public com.google.protobuf.Parser<JournalIdProto> getParserForType() {
121          return PARSER;
122        }
123    
124        private int bitField0_;
125        // required string identifier = 1;
126        public static final int IDENTIFIER_FIELD_NUMBER = 1;
127        private java.lang.Object identifier_;
128        /**
129         * <code>required string identifier = 1;</code>
130         */
131        public boolean hasIdentifier() {
132          return ((bitField0_ & 0x00000001) == 0x00000001);
133        }
134        /**
135         * <code>required string identifier = 1;</code>
136         */
137        public java.lang.String getIdentifier() {
138          java.lang.Object ref = identifier_;
139          if (ref instanceof java.lang.String) {
140            return (java.lang.String) ref;
141          } else {
142            com.google.protobuf.ByteString bs = 
143                (com.google.protobuf.ByteString) ref;
144            java.lang.String s = bs.toStringUtf8();
145            if (bs.isValidUtf8()) {
146              identifier_ = s;
147            }
148            return s;
149          }
150        }
151        /**
152         * <code>required string identifier = 1;</code>
153         */
154        public com.google.protobuf.ByteString
155            getIdentifierBytes() {
156          java.lang.Object ref = identifier_;
157          if (ref instanceof java.lang.String) {
158            com.google.protobuf.ByteString b = 
159                com.google.protobuf.ByteString.copyFromUtf8(
160                    (java.lang.String) ref);
161            identifier_ = b;
162            return b;
163          } else {
164            return (com.google.protobuf.ByteString) ref;
165          }
166        }
167    
168        private void initFields() {
169          identifier_ = "";
170        }
171        private byte memoizedIsInitialized = -1;
172        public final boolean isInitialized() {
173          byte isInitialized = memoizedIsInitialized;
174          if (isInitialized != -1) return isInitialized == 1;
175    
176          if (!hasIdentifier()) {
177            memoizedIsInitialized = 0;
178            return false;
179          }
180          memoizedIsInitialized = 1;
181          return true;
182        }
183    
184        public void writeTo(com.google.protobuf.CodedOutputStream output)
185                            throws java.io.IOException {
186          getSerializedSize();
187          if (((bitField0_ & 0x00000001) == 0x00000001)) {
188            output.writeBytes(1, getIdentifierBytes());
189          }
190          getUnknownFields().writeTo(output);
191        }
192    
193        private int memoizedSerializedSize = -1;
194        public int getSerializedSize() {
195          int size = memoizedSerializedSize;
196          if (size != -1) return size;
197    
198          size = 0;
199          if (((bitField0_ & 0x00000001) == 0x00000001)) {
200            size += com.google.protobuf.CodedOutputStream
201              .computeBytesSize(1, getIdentifierBytes());
202          }
203          size += getUnknownFields().getSerializedSize();
204          memoizedSerializedSize = size;
205          return size;
206        }
207    
208        private static final long serialVersionUID = 0L;
209        @java.lang.Override
210        protected java.lang.Object writeReplace()
211            throws java.io.ObjectStreamException {
212          return super.writeReplace();
213        }
214    
215        @java.lang.Override
216        public boolean equals(final java.lang.Object obj) {
217          if (obj == this) {
218           return true;
219          }
220          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto)) {
221            return super.equals(obj);
222          }
223          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto) obj;
224    
225          boolean result = true;
226          result = result && (hasIdentifier() == other.hasIdentifier());
227          if (hasIdentifier()) {
228            result = result && getIdentifier()
229                .equals(other.getIdentifier());
230          }
231          result = result &&
232              getUnknownFields().equals(other.getUnknownFields());
233          return result;
234        }
235    
236        private int memoizedHashCode = 0;
237        @java.lang.Override
238        public int hashCode() {
239          if (memoizedHashCode != 0) {
240            return memoizedHashCode;
241          }
242          int hash = 41;
243          hash = (19 * hash) + getDescriptorForType().hashCode();
244          if (hasIdentifier()) {
245            hash = (37 * hash) + IDENTIFIER_FIELD_NUMBER;
246            hash = (53 * hash) + getIdentifier().hashCode();
247          }
248          hash = (29 * hash) + getUnknownFields().hashCode();
249          memoizedHashCode = hash;
250          return hash;
251        }
252    
253        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
254            com.google.protobuf.ByteString data)
255            throws com.google.protobuf.InvalidProtocolBufferException {
256          return PARSER.parseFrom(data);
257        }
258        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
259            com.google.protobuf.ByteString data,
260            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
261            throws com.google.protobuf.InvalidProtocolBufferException {
262          return PARSER.parseFrom(data, extensionRegistry);
263        }
264        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(byte[] data)
265            throws com.google.protobuf.InvalidProtocolBufferException {
266          return PARSER.parseFrom(data);
267        }
268        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
269            byte[] data,
270            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
271            throws com.google.protobuf.InvalidProtocolBufferException {
272          return PARSER.parseFrom(data, extensionRegistry);
273        }
274        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(java.io.InputStream input)
275            throws java.io.IOException {
276          return PARSER.parseFrom(input);
277        }
278        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
279            java.io.InputStream input,
280            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
281            throws java.io.IOException {
282          return PARSER.parseFrom(input, extensionRegistry);
283        }
284        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseDelimitedFrom(java.io.InputStream input)
285            throws java.io.IOException {
286          return PARSER.parseDelimitedFrom(input);
287        }
288        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseDelimitedFrom(
289            java.io.InputStream input,
290            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
291            throws java.io.IOException {
292          return PARSER.parseDelimitedFrom(input, extensionRegistry);
293        }
294        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
295            com.google.protobuf.CodedInputStream input)
296            throws java.io.IOException {
297          return PARSER.parseFrom(input);
298        }
299        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
300            com.google.protobuf.CodedInputStream input,
301            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
302            throws java.io.IOException {
303          return PARSER.parseFrom(input, extensionRegistry);
304        }
305    
306        public static Builder newBuilder() { return Builder.create(); }
307        public Builder newBuilderForType() { return newBuilder(); }
308        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto prototype) {
309          return newBuilder().mergeFrom(prototype);
310        }
311        public Builder toBuilder() { return newBuilder(this); }
312    
313        @java.lang.Override
314        protected Builder newBuilderForType(
315            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
316          Builder builder = new Builder(parent);
317          return builder;
318        }
319        /**
320         * Protobuf type {@code hadoop.hdfs.qjournal.JournalIdProto}
321         */
322        public static final class Builder extends
323            com.google.protobuf.GeneratedMessage.Builder<Builder>
324           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder {
325          public static final com.google.protobuf.Descriptors.Descriptor
326              getDescriptor() {
327            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalIdProto_descriptor;
328          }
329    
330          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
331              internalGetFieldAccessorTable() {
332            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalIdProto_fieldAccessorTable
333                .ensureFieldAccessorsInitialized(
334                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder.class);
335          }
336    
337          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder()
338          private Builder() {
339            maybeForceBuilderInitialization();
340          }
341    
342          private Builder(
343              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
344            super(parent);
345            maybeForceBuilderInitialization();
346          }
347          private void maybeForceBuilderInitialization() {
348            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
349            }
350          }
351          private static Builder create() {
352            return new Builder();
353          }
354    
355          public Builder clear() {
356            super.clear();
357            identifier_ = "";
358            bitField0_ = (bitField0_ & ~0x00000001);
359            return this;
360          }
361    
362          public Builder clone() {
363            return create().mergeFrom(buildPartial());
364          }
365    
366          public com.google.protobuf.Descriptors.Descriptor
367              getDescriptorForType() {
368            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalIdProto_descriptor;
369          }
370    
371          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getDefaultInstanceForType() {
372            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
373          }
374    
375          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto build() {
376            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto result = buildPartial();
377            if (!result.isInitialized()) {
378              throw newUninitializedMessageException(result);
379            }
380            return result;
381          }
382    
383          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto buildPartial() {
384            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto(this);
385            int from_bitField0_ = bitField0_;
386            int to_bitField0_ = 0;
387            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
388              to_bitField0_ |= 0x00000001;
389            }
390            result.identifier_ = identifier_;
391            result.bitField0_ = to_bitField0_;
392            onBuilt();
393            return result;
394          }
395    
396          public Builder mergeFrom(com.google.protobuf.Message other) {
397            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto) {
398              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto)other);
399            } else {
400              super.mergeFrom(other);
401              return this;
402            }
403          }
404    
405          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto other) {
406            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) return this;
407            if (other.hasIdentifier()) {
408              bitField0_ |= 0x00000001;
409              identifier_ = other.identifier_;
410              onChanged();
411            }
412            this.mergeUnknownFields(other.getUnknownFields());
413            return this;
414          }
415    
416          public final boolean isInitialized() {
417            if (!hasIdentifier()) {
418              
419              return false;
420            }
421            return true;
422          }
423    
424          public Builder mergeFrom(
425              com.google.protobuf.CodedInputStream input,
426              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
427              throws java.io.IOException {
428            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parsedMessage = null;
429            try {
430              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
431            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
432              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto) e.getUnfinishedMessage();
433              throw e;
434            } finally {
435              if (parsedMessage != null) {
436                mergeFrom(parsedMessage);
437              }
438            }
439            return this;
440          }
441          private int bitField0_;
442    
443          // required string identifier = 1;
444          private java.lang.Object identifier_ = "";
445          /**
446           * <code>required string identifier = 1;</code>
447           */
448          public boolean hasIdentifier() {
449            return ((bitField0_ & 0x00000001) == 0x00000001);
450          }
451          /**
452           * <code>required string identifier = 1;</code>
453           */
454          public java.lang.String getIdentifier() {
455            java.lang.Object ref = identifier_;
456            if (!(ref instanceof java.lang.String)) {
457              java.lang.String s = ((com.google.protobuf.ByteString) ref)
458                  .toStringUtf8();
459              identifier_ = s;
460              return s;
461            } else {
462              return (java.lang.String) ref;
463            }
464          }
465          /**
466           * <code>required string identifier = 1;</code>
467           */
468          public com.google.protobuf.ByteString
469              getIdentifierBytes() {
470            java.lang.Object ref = identifier_;
471            if (ref instanceof String) {
472              com.google.protobuf.ByteString b = 
473                  com.google.protobuf.ByteString.copyFromUtf8(
474                      (java.lang.String) ref);
475              identifier_ = b;
476              return b;
477            } else {
478              return (com.google.protobuf.ByteString) ref;
479            }
480          }
481          /**
482           * <code>required string identifier = 1;</code>
483           */
484          public Builder setIdentifier(
485              java.lang.String value) {
486            if (value == null) {
487        throw new NullPointerException();
488      }
489      bitField0_ |= 0x00000001;
490            identifier_ = value;
491            onChanged();
492            return this;
493          }
494          /**
495           * <code>required string identifier = 1;</code>
496           */
497          public Builder clearIdentifier() {
498            bitField0_ = (bitField0_ & ~0x00000001);
499            identifier_ = getDefaultInstance().getIdentifier();
500            onChanged();
501            return this;
502          }
503          /**
504           * <code>required string identifier = 1;</code>
505           */
506          public Builder setIdentifierBytes(
507              com.google.protobuf.ByteString value) {
508            if (value == null) {
509        throw new NullPointerException();
510      }
511      bitField0_ |= 0x00000001;
512            identifier_ = value;
513            onChanged();
514            return this;
515          }
516    
517          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.JournalIdProto)
518        }
519    
520        static {
521          defaultInstance = new JournalIdProto(true);
522          defaultInstance.initFields();
523        }
524    
525        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.JournalIdProto)
526      }
527    
528      public interface RequestInfoProtoOrBuilder
529          extends com.google.protobuf.MessageOrBuilder {
530    
531        // required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1;
532        /**
533         * <code>required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1;</code>
534         */
535        boolean hasJournalId();
536        /**
537         * <code>required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1;</code>
538         */
539        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJournalId();
540        /**
541         * <code>required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1;</code>
542         */
543        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJournalIdOrBuilder();
544    
545        // required uint64 epoch = 2;
546        /**
547         * <code>required uint64 epoch = 2;</code>
548         */
549        boolean hasEpoch();
550        /**
551         * <code>required uint64 epoch = 2;</code>
552         */
553        long getEpoch();
554    
555        // required uint64 ipcSerialNumber = 3;
556        /**
557         * <code>required uint64 ipcSerialNumber = 3;</code>
558         */
559        boolean hasIpcSerialNumber();
560        /**
561         * <code>required uint64 ipcSerialNumber = 3;</code>
562         */
563        long getIpcSerialNumber();
564    
565        // optional uint64 committedTxId = 4;
566        /**
567         * <code>optional uint64 committedTxId = 4;</code>
568         *
569         * <pre>
570         * Whenever a writer makes a request, it informs
571         * the node of the latest committed txid. This may
572         * be higher than the transaction data included in the
573         * request itself, eg in the case that the node has
574         * fallen behind.
575         * </pre>
576         */
577        boolean hasCommittedTxId();
578        /**
579         * <code>optional uint64 committedTxId = 4;</code>
580         *
581         * <pre>
582         * Whenever a writer makes a request, it informs
583         * the node of the latest committed txid. This may
584         * be higher than the transaction data included in the
585         * request itself, eg in the case that the node has
586         * fallen behind.
587         * </pre>
588         */
589        long getCommittedTxId();
590      }
591      /**
592       * Protobuf type {@code hadoop.hdfs.qjournal.RequestInfoProto}
593       */
594      public static final class RequestInfoProto extends
595          com.google.protobuf.GeneratedMessage
596          implements RequestInfoProtoOrBuilder {
597        // Use RequestInfoProto.newBuilder() to construct.
598        private RequestInfoProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
599          super(builder);
600          this.unknownFields = builder.getUnknownFields();
601        }
602        private RequestInfoProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
603    
604        private static final RequestInfoProto defaultInstance;
605        public static RequestInfoProto getDefaultInstance() {
606          return defaultInstance;
607        }
608    
609        public RequestInfoProto getDefaultInstanceForType() {
610          return defaultInstance;
611        }
612    
613        private final com.google.protobuf.UnknownFieldSet unknownFields;
614        @java.lang.Override
615        public final com.google.protobuf.UnknownFieldSet
616            getUnknownFields() {
617          return this.unknownFields;
618        }
619        private RequestInfoProto(
620            com.google.protobuf.CodedInputStream input,
621            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
622            throws com.google.protobuf.InvalidProtocolBufferException {
623          initFields();
624          int mutable_bitField0_ = 0;
625          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
626              com.google.protobuf.UnknownFieldSet.newBuilder();
627          try {
628            boolean done = false;
629            while (!done) {
630              int tag = input.readTag();
631              switch (tag) {
632                case 0:
633                  done = true;
634                  break;
635                default: {
636                  if (!parseUnknownField(input, unknownFields,
637                                         extensionRegistry, tag)) {
638                    done = true;
639                  }
640                  break;
641                }
642                case 10: {
643                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
644                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
645                    subBuilder = journalId_.toBuilder();
646                  }
647                  journalId_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
648                  if (subBuilder != null) {
649                    subBuilder.mergeFrom(journalId_);
650                    journalId_ = subBuilder.buildPartial();
651                  }
652                  bitField0_ |= 0x00000001;
653                  break;
654                }
655                case 16: {
656                  bitField0_ |= 0x00000002;
657                  epoch_ = input.readUInt64();
658                  break;
659                }
660                case 24: {
661                  bitField0_ |= 0x00000004;
662                  ipcSerialNumber_ = input.readUInt64();
663                  break;
664                }
665                case 32: {
666                  bitField0_ |= 0x00000008;
667                  committedTxId_ = input.readUInt64();
668                  break;
669                }
670              }
671            }
672          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
673            throw e.setUnfinishedMessage(this);
674          } catch (java.io.IOException e) {
675            throw new com.google.protobuf.InvalidProtocolBufferException(
676                e.getMessage()).setUnfinishedMessage(this);
677          } finally {
678            this.unknownFields = unknownFields.build();
679            makeExtensionsImmutable();
680          }
681        }
682        public static final com.google.protobuf.Descriptors.Descriptor
683            getDescriptor() {
684          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_RequestInfoProto_descriptor;
685        }
686    
687        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
688            internalGetFieldAccessorTable() {
689          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_RequestInfoProto_fieldAccessorTable
690              .ensureFieldAccessorsInitialized(
691                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder.class);
692        }
693    
694        public static com.google.protobuf.Parser<RequestInfoProto> PARSER =
695            new com.google.protobuf.AbstractParser<RequestInfoProto>() {
696          public RequestInfoProto parsePartialFrom(
697              com.google.protobuf.CodedInputStream input,
698              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
699              throws com.google.protobuf.InvalidProtocolBufferException {
700            return new RequestInfoProto(input, extensionRegistry);
701          }
702        };
703    
704        @java.lang.Override
705        public com.google.protobuf.Parser<RequestInfoProto> getParserForType() {
706          return PARSER;
707        }
708    
709        private int bitField0_;
710        // required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1;
711        public static final int JOURNALID_FIELD_NUMBER = 1;
712        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto journalId_;
713        /**
714         * <code>required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1;</code>
715         */
716        public boolean hasJournalId() {
717          return ((bitField0_ & 0x00000001) == 0x00000001);
718        }
719        /**
720         * <code>required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1;</code>
721         */
722        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJournalId() {
723          return journalId_;
724        }
725        /**
726         * <code>required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1;</code>
727         */
728        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJournalIdOrBuilder() {
729          return journalId_;
730        }
731    
732        // required uint64 epoch = 2;
733        public static final int EPOCH_FIELD_NUMBER = 2;
734        private long epoch_;
735        /**
736         * <code>required uint64 epoch = 2;</code>
737         */
738        public boolean hasEpoch() {
739          return ((bitField0_ & 0x00000002) == 0x00000002);
740        }
741        /**
742         * <code>required uint64 epoch = 2;</code>
743         */
744        public long getEpoch() {
745          return epoch_;
746        }
747    
748        // required uint64 ipcSerialNumber = 3;
749        public static final int IPCSERIALNUMBER_FIELD_NUMBER = 3;
750        private long ipcSerialNumber_;
751        /**
752         * <code>required uint64 ipcSerialNumber = 3;</code>
753         */
754        public boolean hasIpcSerialNumber() {
755          return ((bitField0_ & 0x00000004) == 0x00000004);
756        }
757        /**
758         * <code>required uint64 ipcSerialNumber = 3;</code>
759         */
760        public long getIpcSerialNumber() {
761          return ipcSerialNumber_;
762        }
763    
764        // optional uint64 committedTxId = 4;
765        public static final int COMMITTEDTXID_FIELD_NUMBER = 4;
766        private long committedTxId_;
767        /**
768         * <code>optional uint64 committedTxId = 4;</code>
769         *
770         * <pre>
771         * Whenever a writer makes a request, it informs
772         * the node of the latest committed txid. This may
773         * be higher than the transaction data included in the
774         * request itself, eg in the case that the node has
775         * fallen behind.
776         * </pre>
777         */
778        public boolean hasCommittedTxId() {
779          return ((bitField0_ & 0x00000008) == 0x00000008);
780        }
781        /**
782         * <code>optional uint64 committedTxId = 4;</code>
783         *
784         * <pre>
785         * Whenever a writer makes a request, it informs
786         * the node of the latest committed txid. This may
787         * be higher than the transaction data included in the
788         * request itself, eg in the case that the node has
789         * fallen behind.
790         * </pre>
791         */
792        public long getCommittedTxId() {
793          return committedTxId_;
794        }
795    
796        private void initFields() {
797          journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
798          epoch_ = 0L;
799          ipcSerialNumber_ = 0L;
800          committedTxId_ = 0L;
801        }
802        private byte memoizedIsInitialized = -1;
803        public final boolean isInitialized() {
804          byte isInitialized = memoizedIsInitialized;
805          if (isInitialized != -1) return isInitialized == 1;
806    
807          if (!hasJournalId()) {
808            memoizedIsInitialized = 0;
809            return false;
810          }
811          if (!hasEpoch()) {
812            memoizedIsInitialized = 0;
813            return false;
814          }
815          if (!hasIpcSerialNumber()) {
816            memoizedIsInitialized = 0;
817            return false;
818          }
819          if (!getJournalId().isInitialized()) {
820            memoizedIsInitialized = 0;
821            return false;
822          }
823          memoizedIsInitialized = 1;
824          return true;
825        }
826    
827        public void writeTo(com.google.protobuf.CodedOutputStream output)
828                            throws java.io.IOException {
829          getSerializedSize();
830          if (((bitField0_ & 0x00000001) == 0x00000001)) {
831            output.writeMessage(1, journalId_);
832          }
833          if (((bitField0_ & 0x00000002) == 0x00000002)) {
834            output.writeUInt64(2, epoch_);
835          }
836          if (((bitField0_ & 0x00000004) == 0x00000004)) {
837            output.writeUInt64(3, ipcSerialNumber_);
838          }
839          if (((bitField0_ & 0x00000008) == 0x00000008)) {
840            output.writeUInt64(4, committedTxId_);
841          }
842          getUnknownFields().writeTo(output);
843        }
844    
845        private int memoizedSerializedSize = -1;
846        public int getSerializedSize() {
847          int size = memoizedSerializedSize;
848          if (size != -1) return size;
849    
850          size = 0;
851          if (((bitField0_ & 0x00000001) == 0x00000001)) {
852            size += com.google.protobuf.CodedOutputStream
853              .computeMessageSize(1, journalId_);
854          }
855          if (((bitField0_ & 0x00000002) == 0x00000002)) {
856            size += com.google.protobuf.CodedOutputStream
857              .computeUInt64Size(2, epoch_);
858          }
859          if (((bitField0_ & 0x00000004) == 0x00000004)) {
860            size += com.google.protobuf.CodedOutputStream
861              .computeUInt64Size(3, ipcSerialNumber_);
862          }
863          if (((bitField0_ & 0x00000008) == 0x00000008)) {
864            size += com.google.protobuf.CodedOutputStream
865              .computeUInt64Size(4, committedTxId_);
866          }
867          size += getUnknownFields().getSerializedSize();
868          memoizedSerializedSize = size;
869          return size;
870        }
871    
872        private static final long serialVersionUID = 0L;
873        @java.lang.Override
874        protected java.lang.Object writeReplace()
875            throws java.io.ObjectStreamException {
876          return super.writeReplace();
877        }
878    
879        @java.lang.Override
880        public boolean equals(final java.lang.Object obj) {
881          if (obj == this) {
882           return true;
883          }
884          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto)) {
885            return super.equals(obj);
886          }
887          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto) obj;
888    
889          boolean result = true;
890          result = result && (hasJournalId() == other.hasJournalId());
891          if (hasJournalId()) {
892            result = result && getJournalId()
893                .equals(other.getJournalId());
894          }
895          result = result && (hasEpoch() == other.hasEpoch());
896          if (hasEpoch()) {
897            result = result && (getEpoch()
898                == other.getEpoch());
899          }
900          result = result && (hasIpcSerialNumber() == other.hasIpcSerialNumber());
901          if (hasIpcSerialNumber()) {
902            result = result && (getIpcSerialNumber()
903                == other.getIpcSerialNumber());
904          }
905          result = result && (hasCommittedTxId() == other.hasCommittedTxId());
906          if (hasCommittedTxId()) {
907            result = result && (getCommittedTxId()
908                == other.getCommittedTxId());
909          }
910          result = result &&
911              getUnknownFields().equals(other.getUnknownFields());
912          return result;
913        }
914    
915        private int memoizedHashCode = 0;
916        @java.lang.Override
917        public int hashCode() {
918          if (memoizedHashCode != 0) {
919            return memoizedHashCode;
920          }
921          int hash = 41;
922          hash = (19 * hash) + getDescriptorForType().hashCode();
923          if (hasJournalId()) {
924            hash = (37 * hash) + JOURNALID_FIELD_NUMBER;
925            hash = (53 * hash) + getJournalId().hashCode();
926          }
927          if (hasEpoch()) {
928            hash = (37 * hash) + EPOCH_FIELD_NUMBER;
929            hash = (53 * hash) + hashLong(getEpoch());
930          }
931          if (hasIpcSerialNumber()) {
932            hash = (37 * hash) + IPCSERIALNUMBER_FIELD_NUMBER;
933            hash = (53 * hash) + hashLong(getIpcSerialNumber());
934          }
935          if (hasCommittedTxId()) {
936            hash = (37 * hash) + COMMITTEDTXID_FIELD_NUMBER;
937            hash = (53 * hash) + hashLong(getCommittedTxId());
938          }
939          hash = (29 * hash) + getUnknownFields().hashCode();
940          memoizedHashCode = hash;
941          return hash;
942        }
943    
944        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
945            com.google.protobuf.ByteString data)
946            throws com.google.protobuf.InvalidProtocolBufferException {
947          return PARSER.parseFrom(data);
948        }
949        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
950            com.google.protobuf.ByteString data,
951            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
952            throws com.google.protobuf.InvalidProtocolBufferException {
953          return PARSER.parseFrom(data, extensionRegistry);
954        }
955        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(byte[] data)
956            throws com.google.protobuf.InvalidProtocolBufferException {
957          return PARSER.parseFrom(data);
958        }
959        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
960            byte[] data,
961            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
962            throws com.google.protobuf.InvalidProtocolBufferException {
963          return PARSER.parseFrom(data, extensionRegistry);
964        }
965        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(java.io.InputStream input)
966            throws java.io.IOException {
967          return PARSER.parseFrom(input);
968        }
969        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
970            java.io.InputStream input,
971            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
972            throws java.io.IOException {
973          return PARSER.parseFrom(input, extensionRegistry);
974        }
975        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseDelimitedFrom(java.io.InputStream input)
976            throws java.io.IOException {
977          return PARSER.parseDelimitedFrom(input);
978        }
979        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseDelimitedFrom(
980            java.io.InputStream input,
981            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
982            throws java.io.IOException {
983          return PARSER.parseDelimitedFrom(input, extensionRegistry);
984        }
985        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
986            com.google.protobuf.CodedInputStream input)
987            throws java.io.IOException {
988          return PARSER.parseFrom(input);
989        }
990        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
991            com.google.protobuf.CodedInputStream input,
992            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
993            throws java.io.IOException {
994          return PARSER.parseFrom(input, extensionRegistry);
995        }
996    
997        public static Builder newBuilder() { return Builder.create(); }
998        public Builder newBuilderForType() { return newBuilder(); }
999        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto prototype) {
1000          return newBuilder().mergeFrom(prototype);
1001        }
1002        public Builder toBuilder() { return newBuilder(this); }
1003    
1004        @java.lang.Override
1005        protected Builder newBuilderForType(
1006            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1007          Builder builder = new Builder(parent);
1008          return builder;
1009        }
1010        /**
1011         * Protobuf type {@code hadoop.hdfs.qjournal.RequestInfoProto}
1012         */
1013        public static final class Builder extends
1014            com.google.protobuf.GeneratedMessage.Builder<Builder>
1015           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder {
1016          public static final com.google.protobuf.Descriptors.Descriptor
1017              getDescriptor() {
1018            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_RequestInfoProto_descriptor;
1019          }
1020    
1021          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1022              internalGetFieldAccessorTable() {
1023            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_RequestInfoProto_fieldAccessorTable
1024                .ensureFieldAccessorsInitialized(
1025                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder.class);
1026          }
1027    
1028          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder()
1029          private Builder() {
1030            maybeForceBuilderInitialization();
1031          }
1032    
1033          private Builder(
1034              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1035            super(parent);
1036            maybeForceBuilderInitialization();
1037          }
1038          private void maybeForceBuilderInitialization() {
1039            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
1040              getJournalIdFieldBuilder();
1041            }
1042          }
1043          private static Builder create() {
1044            return new Builder();
1045          }
1046    
1047          public Builder clear() {
1048            super.clear();
1049            if (journalIdBuilder_ == null) {
1050              journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
1051            } else {
1052              journalIdBuilder_.clear();
1053            }
1054            bitField0_ = (bitField0_ & ~0x00000001);
1055            epoch_ = 0L;
1056            bitField0_ = (bitField0_ & ~0x00000002);
1057            ipcSerialNumber_ = 0L;
1058            bitField0_ = (bitField0_ & ~0x00000004);
1059            committedTxId_ = 0L;
1060            bitField0_ = (bitField0_ & ~0x00000008);
1061            return this;
1062          }
1063    
1064          public Builder clone() {
1065            return create().mergeFrom(buildPartial());
1066          }
1067    
1068          public com.google.protobuf.Descriptors.Descriptor
1069              getDescriptorForType() {
1070            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_RequestInfoProto_descriptor;
1071          }
1072    
1073          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getDefaultInstanceForType() {
1074            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
1075          }
1076    
1077          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto build() {
1078            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto result = buildPartial();
1079            if (!result.isInitialized()) {
1080              throw newUninitializedMessageException(result);
1081            }
1082            return result;
1083          }
1084    
1085          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto buildPartial() {
1086            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto(this);
1087            int from_bitField0_ = bitField0_;
1088            int to_bitField0_ = 0;
1089            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
1090              to_bitField0_ |= 0x00000001;
1091            }
1092            if (journalIdBuilder_ == null) {
1093              result.journalId_ = journalId_;
1094            } else {
1095              result.journalId_ = journalIdBuilder_.build();
1096            }
1097            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
1098              to_bitField0_ |= 0x00000002;
1099            }
1100            result.epoch_ = epoch_;
1101            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
1102              to_bitField0_ |= 0x00000004;
1103            }
1104            result.ipcSerialNumber_ = ipcSerialNumber_;
1105            if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
1106              to_bitField0_ |= 0x00000008;
1107            }
1108            result.committedTxId_ = committedTxId_;
1109            result.bitField0_ = to_bitField0_;
1110            onBuilt();
1111            return result;
1112          }
1113    
1114          public Builder mergeFrom(com.google.protobuf.Message other) {
1115            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto) {
1116              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto)other);
1117            } else {
1118              super.mergeFrom(other);
1119              return this;
1120            }
1121          }
1122    
1123          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto other) {
1124            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) return this;
1125            if (other.hasJournalId()) {
1126              mergeJournalId(other.getJournalId());
1127            }
1128            if (other.hasEpoch()) {
1129              setEpoch(other.getEpoch());
1130            }
1131            if (other.hasIpcSerialNumber()) {
1132              setIpcSerialNumber(other.getIpcSerialNumber());
1133            }
1134            if (other.hasCommittedTxId()) {
1135              setCommittedTxId(other.getCommittedTxId());
1136            }
1137            this.mergeUnknownFields(other.getUnknownFields());
1138            return this;
1139          }
1140    
1141          public final boolean isInitialized() {
1142            if (!hasJournalId()) {
1143              
1144              return false;
1145            }
1146            if (!hasEpoch()) {
1147              
1148              return false;
1149            }
1150            if (!hasIpcSerialNumber()) {
1151              
1152              return false;
1153            }
1154            if (!getJournalId().isInitialized()) {
1155              
1156              return false;
1157            }
1158            return true;
1159          }
1160    
1161          public Builder mergeFrom(
1162              com.google.protobuf.CodedInputStream input,
1163              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1164              throws java.io.IOException {
1165            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parsedMessage = null;
1166            try {
1167              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
1168            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1169              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto) e.getUnfinishedMessage();
1170              throw e;
1171            } finally {
1172              if (parsedMessage != null) {
1173                mergeFrom(parsedMessage);
1174              }
1175            }
1176            return this;
1177          }
1178          private int bitField0_;
1179    
1180          // required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1;
1181          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
1182          private com.google.protobuf.SingleFieldBuilder<
1183              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> journalIdBuilder_;
1184          /**
1185           * <code>required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1;</code>
1186           */
1187          public boolean hasJournalId() {
1188            return ((bitField0_ & 0x00000001) == 0x00000001);
1189          }
1190          /**
1191           * <code>required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1;</code>
1192           */
1193          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJournalId() {
1194            if (journalIdBuilder_ == null) {
1195              return journalId_;
1196            } else {
1197              return journalIdBuilder_.getMessage();
1198            }
1199          }
1200          /**
1201           * <code>required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1;</code>
1202           */
1203          public Builder setJournalId(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
1204            if (journalIdBuilder_ == null) {
1205              if (value == null) {
1206                throw new NullPointerException();
1207              }
1208              journalId_ = value;
1209              onChanged();
1210            } else {
1211              journalIdBuilder_.setMessage(value);
1212            }
1213            bitField0_ |= 0x00000001;
1214            return this;
1215          }
1216          /**
1217           * <code>required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1;</code>
1218           */
1219          public Builder setJournalId(
1220              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
1221            if (journalIdBuilder_ == null) {
1222              journalId_ = builderForValue.build();
1223              onChanged();
1224            } else {
1225              journalIdBuilder_.setMessage(builderForValue.build());
1226            }
1227            bitField0_ |= 0x00000001;
1228            return this;
1229          }
1230          /**
1231           * <code>required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1;</code>
1232           */
1233          public Builder mergeJournalId(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
1234            if (journalIdBuilder_ == null) {
1235              if (((bitField0_ & 0x00000001) == 0x00000001) &&
1236                  journalId_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
1237                journalId_ =
1238                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(journalId_).mergeFrom(value).buildPartial();
1239              } else {
1240                journalId_ = value;
1241              }
1242              onChanged();
1243            } else {
1244              journalIdBuilder_.mergeFrom(value);
1245            }
1246            bitField0_ |= 0x00000001;
1247            return this;
1248          }
1249          /**
1250           * <code>required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1;</code>
1251           */
1252          public Builder clearJournalId() {
1253            if (journalIdBuilder_ == null) {
1254              journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
1255              onChanged();
1256            } else {
1257              journalIdBuilder_.clear();
1258            }
1259            bitField0_ = (bitField0_ & ~0x00000001);
1260            return this;
1261          }
1262          /**
1263           * <code>required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1;</code>
1264           */
1265          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJournalIdBuilder() {
1266            bitField0_ |= 0x00000001;
1267            onChanged();
1268            return getJournalIdFieldBuilder().getBuilder();
1269          }
1270          /**
1271           * <code>required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1;</code>
1272           */
1273          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJournalIdOrBuilder() {
1274            if (journalIdBuilder_ != null) {
1275              return journalIdBuilder_.getMessageOrBuilder();
1276            } else {
1277              return journalId_;
1278            }
1279          }
1280          /**
1281           * <code>required .hadoop.hdfs.qjournal.JournalIdProto journalId = 1;</code>
1282           */
1283          private com.google.protobuf.SingleFieldBuilder<
1284              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
1285              getJournalIdFieldBuilder() {
1286            if (journalIdBuilder_ == null) {
1287              journalIdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
1288                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
1289                      journalId_,
1290                      getParentForChildren(),
1291                      isClean());
1292              journalId_ = null;
1293            }
1294            return journalIdBuilder_;
1295          }
1296    
1297          // required uint64 epoch = 2;
1298          private long epoch_ ;
1299          /**
1300           * <code>required uint64 epoch = 2;</code>
1301           */
1302          public boolean hasEpoch() {
1303            return ((bitField0_ & 0x00000002) == 0x00000002);
1304          }
1305          /**
1306           * <code>required uint64 epoch = 2;</code>
1307           */
1308          public long getEpoch() {
1309            return epoch_;
1310          }
1311          /**
1312           * <code>required uint64 epoch = 2;</code>
1313           */
1314          public Builder setEpoch(long value) {
1315            bitField0_ |= 0x00000002;
1316            epoch_ = value;
1317            onChanged();
1318            return this;
1319          }
1320          /**
1321           * <code>required uint64 epoch = 2;</code>
1322           */
1323          public Builder clearEpoch() {
1324            bitField0_ = (bitField0_ & ~0x00000002);
1325            epoch_ = 0L;
1326            onChanged();
1327            return this;
1328          }
1329    
1330          // required uint64 ipcSerialNumber = 3;
1331          private long ipcSerialNumber_ ;
1332          /**
1333           * <code>required uint64 ipcSerialNumber = 3;</code>
1334           */
1335          public boolean hasIpcSerialNumber() {
1336            return ((bitField0_ & 0x00000004) == 0x00000004);
1337          }
1338          /**
1339           * <code>required uint64 ipcSerialNumber = 3;</code>
1340           */
1341          public long getIpcSerialNumber() {
1342            return ipcSerialNumber_;
1343          }
1344          /**
1345           * <code>required uint64 ipcSerialNumber = 3;</code>
1346           */
1347          public Builder setIpcSerialNumber(long value) {
1348            bitField0_ |= 0x00000004;
1349            ipcSerialNumber_ = value;
1350            onChanged();
1351            return this;
1352          }
1353          /**
1354           * <code>required uint64 ipcSerialNumber = 3;</code>
1355           */
1356          public Builder clearIpcSerialNumber() {
1357            bitField0_ = (bitField0_ & ~0x00000004);
1358            ipcSerialNumber_ = 0L;
1359            onChanged();
1360            return this;
1361          }
1362    
1363          // optional uint64 committedTxId = 4;
1364          private long committedTxId_ ;
1365          /**
1366           * <code>optional uint64 committedTxId = 4;</code>
1367           *
1368           * <pre>
1369           * Whenever a writer makes a request, it informs
1370           * the node of the latest committed txid. This may
1371           * be higher than the transaction data included in the
1372           * request itself, eg in the case that the node has
1373           * fallen behind.
1374           * </pre>
1375           */
1376          public boolean hasCommittedTxId() {
1377            return ((bitField0_ & 0x00000008) == 0x00000008);
1378          }
1379          /**
1380           * <code>optional uint64 committedTxId = 4;</code>
1381           *
1382           * <pre>
1383           * Whenever a writer makes a request, it informs
1384           * the node of the latest committed txid. This may
1385           * be higher than the transaction data included in the
1386           * request itself, eg in the case that the node has
1387           * fallen behind.
1388           * </pre>
1389           */
1390          public long getCommittedTxId() {
1391            return committedTxId_;
1392          }
1393          /**
1394           * <code>optional uint64 committedTxId = 4;</code>
1395           *
1396           * <pre>
1397           * Whenever a writer makes a request, it informs
1398           * the node of the latest committed txid. This may
1399           * be higher than the transaction data included in the
1400           * request itself, eg in the case that the node has
1401           * fallen behind.
1402           * </pre>
1403           */
1404          public Builder setCommittedTxId(long value) {
1405            bitField0_ |= 0x00000008;
1406            committedTxId_ = value;
1407            onChanged();
1408            return this;
1409          }
1410          /**
1411           * <code>optional uint64 committedTxId = 4;</code>
1412           *
1413           * <pre>
1414           * Whenever a writer makes a request, it informs
1415           * the node of the latest committed txid. This may
1416           * be higher than the transaction data included in the
1417           * request itself, eg in the case that the node has
1418           * fallen behind.
1419           * </pre>
1420           */
1421          public Builder clearCommittedTxId() {
1422            bitField0_ = (bitField0_ & ~0x00000008);
1423            committedTxId_ = 0L;
1424            onChanged();
1425            return this;
1426          }
1427    
1428          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.RequestInfoProto)
1429        }
1430    
1431        static {
1432          defaultInstance = new RequestInfoProto(true);
1433          defaultInstance.initFields();
1434        }
1435    
1436        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.RequestInfoProto)
1437      }
1438    
1439      public interface SegmentStateProtoOrBuilder
1440          extends com.google.protobuf.MessageOrBuilder {
1441    
1442        // required uint64 startTxId = 1;
1443        /**
1444         * <code>required uint64 startTxId = 1;</code>
1445         */
1446        boolean hasStartTxId();
1447        /**
1448         * <code>required uint64 startTxId = 1;</code>
1449         */
1450        long getStartTxId();
1451    
1452        // required uint64 endTxId = 2;
1453        /**
1454         * <code>required uint64 endTxId = 2;</code>
1455         */
1456        boolean hasEndTxId();
1457        /**
1458         * <code>required uint64 endTxId = 2;</code>
1459         */
1460        long getEndTxId();
1461    
1462        // required bool isInProgress = 3;
1463        /**
1464         * <code>required bool isInProgress = 3;</code>
1465         */
1466        boolean hasIsInProgress();
1467        /**
1468         * <code>required bool isInProgress = 3;</code>
1469         */
1470        boolean getIsInProgress();
1471      }
1472      /**
1473       * Protobuf type {@code hadoop.hdfs.qjournal.SegmentStateProto}
1474       */
1475      public static final class SegmentStateProto extends
1476          com.google.protobuf.GeneratedMessage
1477          implements SegmentStateProtoOrBuilder {
1478        // Use SegmentStateProto.newBuilder() to construct.
1479        private SegmentStateProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
1480          super(builder);
1481          this.unknownFields = builder.getUnknownFields();
1482        }
1483        private SegmentStateProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
1484    
1485        private static final SegmentStateProto defaultInstance;
1486        public static SegmentStateProto getDefaultInstance() {
1487          return defaultInstance;
1488        }
1489    
1490        public SegmentStateProto getDefaultInstanceForType() {
1491          return defaultInstance;
1492        }
1493    
1494        private final com.google.protobuf.UnknownFieldSet unknownFields;
1495        @java.lang.Override
1496        public final com.google.protobuf.UnknownFieldSet
1497            getUnknownFields() {
1498          return this.unknownFields;
1499        }
1500        private SegmentStateProto(
1501            com.google.protobuf.CodedInputStream input,
1502            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1503            throws com.google.protobuf.InvalidProtocolBufferException {
1504          initFields();
1505          int mutable_bitField0_ = 0;
1506          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
1507              com.google.protobuf.UnknownFieldSet.newBuilder();
1508          try {
1509            boolean done = false;
1510            while (!done) {
1511              int tag = input.readTag();
1512              switch (tag) {
1513                case 0:
1514                  done = true;
1515                  break;
1516                default: {
1517                  if (!parseUnknownField(input, unknownFields,
1518                                         extensionRegistry, tag)) {
1519                    done = true;
1520                  }
1521                  break;
1522                }
1523                case 8: {
1524                  bitField0_ |= 0x00000001;
1525                  startTxId_ = input.readUInt64();
1526                  break;
1527                }
1528                case 16: {
1529                  bitField0_ |= 0x00000002;
1530                  endTxId_ = input.readUInt64();
1531                  break;
1532                }
1533                case 24: {
1534                  bitField0_ |= 0x00000004;
1535                  isInProgress_ = input.readBool();
1536                  break;
1537                }
1538              }
1539            }
1540          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1541            throw e.setUnfinishedMessage(this);
1542          } catch (java.io.IOException e) {
1543            throw new com.google.protobuf.InvalidProtocolBufferException(
1544                e.getMessage()).setUnfinishedMessage(this);
1545          } finally {
1546            this.unknownFields = unknownFields.build();
1547            makeExtensionsImmutable();
1548          }
1549        }
1550        public static final com.google.protobuf.Descriptors.Descriptor
1551            getDescriptor() {
1552          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_SegmentStateProto_descriptor;
1553        }
1554    
1555        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1556            internalGetFieldAccessorTable() {
1557          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_SegmentStateProto_fieldAccessorTable
1558              .ensureFieldAccessorsInitialized(
1559                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder.class);
1560        }
1561    
1562        public static com.google.protobuf.Parser<SegmentStateProto> PARSER =
1563            new com.google.protobuf.AbstractParser<SegmentStateProto>() {
1564          public SegmentStateProto parsePartialFrom(
1565              com.google.protobuf.CodedInputStream input,
1566              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1567              throws com.google.protobuf.InvalidProtocolBufferException {
1568            return new SegmentStateProto(input, extensionRegistry);
1569          }
1570        };
1571    
1572        @java.lang.Override
1573        public com.google.protobuf.Parser<SegmentStateProto> getParserForType() {
1574          return PARSER;
1575        }
1576    
1577        private int bitField0_;
1578        // required uint64 startTxId = 1;
1579        public static final int STARTTXID_FIELD_NUMBER = 1;
1580        private long startTxId_;
1581        /**
1582         * <code>required uint64 startTxId = 1;</code>
1583         */
1584        public boolean hasStartTxId() {
1585          return ((bitField0_ & 0x00000001) == 0x00000001);
1586        }
1587        /**
1588         * <code>required uint64 startTxId = 1;</code>
1589         */
1590        public long getStartTxId() {
1591          return startTxId_;
1592        }
1593    
1594        // required uint64 endTxId = 2;
1595        public static final int ENDTXID_FIELD_NUMBER = 2;
1596        private long endTxId_;
1597        /**
1598         * <code>required uint64 endTxId = 2;</code>
1599         */
1600        public boolean hasEndTxId() {
1601          return ((bitField0_ & 0x00000002) == 0x00000002);
1602        }
1603        /**
1604         * <code>required uint64 endTxId = 2;</code>
1605         */
1606        public long getEndTxId() {
1607          return endTxId_;
1608        }
1609    
1610        // required bool isInProgress = 3;
1611        public static final int ISINPROGRESS_FIELD_NUMBER = 3;
1612        private boolean isInProgress_;
1613        /**
1614         * <code>required bool isInProgress = 3;</code>
1615         */
1616        public boolean hasIsInProgress() {
1617          return ((bitField0_ & 0x00000004) == 0x00000004);
1618        }
1619        /**
1620         * <code>required bool isInProgress = 3;</code>
1621         */
1622        public boolean getIsInProgress() {
1623          return isInProgress_;
1624        }
1625    
1626        private void initFields() {
1627          startTxId_ = 0L;
1628          endTxId_ = 0L;
1629          isInProgress_ = false;
1630        }
1631        private byte memoizedIsInitialized = -1;
1632        public final boolean isInitialized() {
1633          byte isInitialized = memoizedIsInitialized;
1634          if (isInitialized != -1) return isInitialized == 1;
1635    
1636          if (!hasStartTxId()) {
1637            memoizedIsInitialized = 0;
1638            return false;
1639          }
1640          if (!hasEndTxId()) {
1641            memoizedIsInitialized = 0;
1642            return false;
1643          }
1644          if (!hasIsInProgress()) {
1645            memoizedIsInitialized = 0;
1646            return false;
1647          }
1648          memoizedIsInitialized = 1;
1649          return true;
1650        }
1651    
1652        public void writeTo(com.google.protobuf.CodedOutputStream output)
1653                            throws java.io.IOException {
1654          getSerializedSize();
1655          if (((bitField0_ & 0x00000001) == 0x00000001)) {
1656            output.writeUInt64(1, startTxId_);
1657          }
1658          if (((bitField0_ & 0x00000002) == 0x00000002)) {
1659            output.writeUInt64(2, endTxId_);
1660          }
1661          if (((bitField0_ & 0x00000004) == 0x00000004)) {
1662            output.writeBool(3, isInProgress_);
1663          }
1664          getUnknownFields().writeTo(output);
1665        }
1666    
1667        private int memoizedSerializedSize = -1;
1668        public int getSerializedSize() {
1669          int size = memoizedSerializedSize;
1670          if (size != -1) return size;
1671    
1672          size = 0;
1673          if (((bitField0_ & 0x00000001) == 0x00000001)) {
1674            size += com.google.protobuf.CodedOutputStream
1675              .computeUInt64Size(1, startTxId_);
1676          }
1677          if (((bitField0_ & 0x00000002) == 0x00000002)) {
1678            size += com.google.protobuf.CodedOutputStream
1679              .computeUInt64Size(2, endTxId_);
1680          }
1681          if (((bitField0_ & 0x00000004) == 0x00000004)) {
1682            size += com.google.protobuf.CodedOutputStream
1683              .computeBoolSize(3, isInProgress_);
1684          }
1685          size += getUnknownFields().getSerializedSize();
1686          memoizedSerializedSize = size;
1687          return size;
1688        }
1689    
1690        private static final long serialVersionUID = 0L;
1691        @java.lang.Override
1692        protected java.lang.Object writeReplace()
1693            throws java.io.ObjectStreamException {
1694          return super.writeReplace();
1695        }
1696    
1697        @java.lang.Override
1698        public boolean equals(final java.lang.Object obj) {
1699          if (obj == this) {
1700           return true;
1701          }
1702          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto)) {
1703            return super.equals(obj);
1704          }
1705          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto) obj;
1706    
1707          boolean result = true;
1708          result = result && (hasStartTxId() == other.hasStartTxId());
1709          if (hasStartTxId()) {
1710            result = result && (getStartTxId()
1711                == other.getStartTxId());
1712          }
1713          result = result && (hasEndTxId() == other.hasEndTxId());
1714          if (hasEndTxId()) {
1715            result = result && (getEndTxId()
1716                == other.getEndTxId());
1717          }
1718          result = result && (hasIsInProgress() == other.hasIsInProgress());
1719          if (hasIsInProgress()) {
1720            result = result && (getIsInProgress()
1721                == other.getIsInProgress());
1722          }
1723          result = result &&
1724              getUnknownFields().equals(other.getUnknownFields());
1725          return result;
1726        }
1727    
1728        private int memoizedHashCode = 0;
1729        @java.lang.Override
1730        public int hashCode() {
1731          if (memoizedHashCode != 0) {
1732            return memoizedHashCode;
1733          }
1734          int hash = 41;
1735          hash = (19 * hash) + getDescriptorForType().hashCode();
1736          if (hasStartTxId()) {
1737            hash = (37 * hash) + STARTTXID_FIELD_NUMBER;
1738            hash = (53 * hash) + hashLong(getStartTxId());
1739          }
1740          if (hasEndTxId()) {
1741            hash = (37 * hash) + ENDTXID_FIELD_NUMBER;
1742            hash = (53 * hash) + hashLong(getEndTxId());
1743          }
1744          if (hasIsInProgress()) {
1745            hash = (37 * hash) + ISINPROGRESS_FIELD_NUMBER;
1746            hash = (53 * hash) + hashBoolean(getIsInProgress());
1747          }
1748          hash = (29 * hash) + getUnknownFields().hashCode();
1749          memoizedHashCode = hash;
1750          return hash;
1751        }
1752    
1753        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1754            com.google.protobuf.ByteString data)
1755            throws com.google.protobuf.InvalidProtocolBufferException {
1756          return PARSER.parseFrom(data);
1757        }
1758        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1759            com.google.protobuf.ByteString data,
1760            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1761            throws com.google.protobuf.InvalidProtocolBufferException {
1762          return PARSER.parseFrom(data, extensionRegistry);
1763        }
1764        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(byte[] data)
1765            throws com.google.protobuf.InvalidProtocolBufferException {
1766          return PARSER.parseFrom(data);
1767        }
1768        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1769            byte[] data,
1770            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1771            throws com.google.protobuf.InvalidProtocolBufferException {
1772          return PARSER.parseFrom(data, extensionRegistry);
1773        }
1774        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(java.io.InputStream input)
1775            throws java.io.IOException {
1776          return PARSER.parseFrom(input);
1777        }
1778        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1779            java.io.InputStream input,
1780            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1781            throws java.io.IOException {
1782          return PARSER.parseFrom(input, extensionRegistry);
1783        }
1784        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseDelimitedFrom(java.io.InputStream input)
1785            throws java.io.IOException {
1786          return PARSER.parseDelimitedFrom(input);
1787        }
1788        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseDelimitedFrom(
1789            java.io.InputStream input,
1790            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1791            throws java.io.IOException {
1792          return PARSER.parseDelimitedFrom(input, extensionRegistry);
1793        }
1794        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1795            com.google.protobuf.CodedInputStream input)
1796            throws java.io.IOException {
1797          return PARSER.parseFrom(input);
1798        }
1799        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1800            com.google.protobuf.CodedInputStream input,
1801            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1802            throws java.io.IOException {
1803          return PARSER.parseFrom(input, extensionRegistry);
1804        }
1805    
1806        public static Builder newBuilder() { return Builder.create(); }
1807        public Builder newBuilderForType() { return newBuilder(); }
1808        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto prototype) {
1809          return newBuilder().mergeFrom(prototype);
1810        }
1811        public Builder toBuilder() { return newBuilder(this); }
1812    
1813        @java.lang.Override
1814        protected Builder newBuilderForType(
1815            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1816          Builder builder = new Builder(parent);
1817          return builder;
1818        }
1819        /**
1820         * Protobuf type {@code hadoop.hdfs.qjournal.SegmentStateProto}
1821         */
1822        public static final class Builder extends
1823            com.google.protobuf.GeneratedMessage.Builder<Builder>
1824           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder {
1825          public static final com.google.protobuf.Descriptors.Descriptor
1826              getDescriptor() {
1827            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_SegmentStateProto_descriptor;
1828          }
1829    
1830          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1831              internalGetFieldAccessorTable() {
1832            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_SegmentStateProto_fieldAccessorTable
1833                .ensureFieldAccessorsInitialized(
1834                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder.class);
1835          }
1836    
1837          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder()
1838          private Builder() {
1839            maybeForceBuilderInitialization();
1840          }
1841    
1842          private Builder(
1843              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1844            super(parent);
1845            maybeForceBuilderInitialization();
1846          }
1847          private void maybeForceBuilderInitialization() {
1848            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
1849            }
1850          }
1851          private static Builder create() {
1852            return new Builder();
1853          }
1854    
1855          public Builder clear() {
1856            super.clear();
1857            startTxId_ = 0L;
1858            bitField0_ = (bitField0_ & ~0x00000001);
1859            endTxId_ = 0L;
1860            bitField0_ = (bitField0_ & ~0x00000002);
1861            isInProgress_ = false;
1862            bitField0_ = (bitField0_ & ~0x00000004);
1863            return this;
1864          }
1865    
1866          public Builder clone() {
1867            return create().mergeFrom(buildPartial());
1868          }
1869    
1870          public com.google.protobuf.Descriptors.Descriptor
1871              getDescriptorForType() {
1872            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_SegmentStateProto_descriptor;
1873          }
1874    
1875          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getDefaultInstanceForType() {
1876            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
1877          }
1878    
1879          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto build() {
1880            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto result = buildPartial();
1881            if (!result.isInitialized()) {
1882              throw newUninitializedMessageException(result);
1883            }
1884            return result;
1885          }
1886    
1887          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto buildPartial() {
1888            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto(this);
1889            int from_bitField0_ = bitField0_;
1890            int to_bitField0_ = 0;
1891            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
1892              to_bitField0_ |= 0x00000001;
1893            }
1894            result.startTxId_ = startTxId_;
1895            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
1896              to_bitField0_ |= 0x00000002;
1897            }
1898            result.endTxId_ = endTxId_;
1899            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
1900              to_bitField0_ |= 0x00000004;
1901            }
1902            result.isInProgress_ = isInProgress_;
1903            result.bitField0_ = to_bitField0_;
1904            onBuilt();
1905            return result;
1906          }
1907    
1908          public Builder mergeFrom(com.google.protobuf.Message other) {
1909            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto) {
1910              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto)other);
1911            } else {
1912              super.mergeFrom(other);
1913              return this;
1914            }
1915          }
1916    
1917          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto other) {
1918            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) return this;
1919            if (other.hasStartTxId()) {
1920              setStartTxId(other.getStartTxId());
1921            }
1922            if (other.hasEndTxId()) {
1923              setEndTxId(other.getEndTxId());
1924            }
1925            if (other.hasIsInProgress()) {
1926              setIsInProgress(other.getIsInProgress());
1927            }
1928            this.mergeUnknownFields(other.getUnknownFields());
1929            return this;
1930          }
1931    
1932          public final boolean isInitialized() {
1933            if (!hasStartTxId()) {
1934              
1935              return false;
1936            }
1937            if (!hasEndTxId()) {
1938              
1939              return false;
1940            }
1941            if (!hasIsInProgress()) {
1942              
1943              return false;
1944            }
1945            return true;
1946          }
1947    
1948          public Builder mergeFrom(
1949              com.google.protobuf.CodedInputStream input,
1950              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1951              throws java.io.IOException {
1952            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parsedMessage = null;
1953            try {
1954              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
1955            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
1956              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto) e.getUnfinishedMessage();
1957              throw e;
1958            } finally {
1959              if (parsedMessage != null) {
1960                mergeFrom(parsedMessage);
1961              }
1962            }
1963            return this;
1964          }
1965          private int bitField0_;
1966    
1967          // required uint64 startTxId = 1;
1968          private long startTxId_ ;
1969          /**
1970           * <code>required uint64 startTxId = 1;</code>
1971           */
1972          public boolean hasStartTxId() {
1973            return ((bitField0_ & 0x00000001) == 0x00000001);
1974          }
1975          /**
1976           * <code>required uint64 startTxId = 1;</code>
1977           */
1978          public long getStartTxId() {
1979            return startTxId_;
1980          }
1981          /**
1982           * <code>required uint64 startTxId = 1;</code>
1983           */
1984          public Builder setStartTxId(long value) {
1985            bitField0_ |= 0x00000001;
1986            startTxId_ = value;
1987            onChanged();
1988            return this;
1989          }
1990          /**
1991           * <code>required uint64 startTxId = 1;</code>
1992           */
1993          public Builder clearStartTxId() {
1994            bitField0_ = (bitField0_ & ~0x00000001);
1995            startTxId_ = 0L;
1996            onChanged();
1997            return this;
1998          }
1999    
2000          // required uint64 endTxId = 2;
2001          private long endTxId_ ;
2002          /**
2003           * <code>required uint64 endTxId = 2;</code>
2004           */
2005          public boolean hasEndTxId() {
2006            return ((bitField0_ & 0x00000002) == 0x00000002);
2007          }
2008          /**
2009           * <code>required uint64 endTxId = 2;</code>
2010           */
2011          public long getEndTxId() {
2012            return endTxId_;
2013          }
2014          /**
2015           * <code>required uint64 endTxId = 2;</code>
2016           */
2017          public Builder setEndTxId(long value) {
2018            bitField0_ |= 0x00000002;
2019            endTxId_ = value;
2020            onChanged();
2021            return this;
2022          }
2023          /**
2024           * <code>required uint64 endTxId = 2;</code>
2025           */
2026          public Builder clearEndTxId() {
2027            bitField0_ = (bitField0_ & ~0x00000002);
2028            endTxId_ = 0L;
2029            onChanged();
2030            return this;
2031          }
2032    
2033          // required bool isInProgress = 3;
2034          private boolean isInProgress_ ;
2035          /**
2036           * <code>required bool isInProgress = 3;</code>
2037           */
2038          public boolean hasIsInProgress() {
2039            return ((bitField0_ & 0x00000004) == 0x00000004);
2040          }
2041          /**
2042           * <code>required bool isInProgress = 3;</code>
2043           */
2044          public boolean getIsInProgress() {
2045            return isInProgress_;
2046          }
2047          /**
2048           * <code>required bool isInProgress = 3;</code>
2049           */
2050          public Builder setIsInProgress(boolean value) {
2051            bitField0_ |= 0x00000004;
2052            isInProgress_ = value;
2053            onChanged();
2054            return this;
2055          }
2056          /**
2057           * <code>required bool isInProgress = 3;</code>
2058           */
2059          public Builder clearIsInProgress() {
2060            bitField0_ = (bitField0_ & ~0x00000004);
2061            isInProgress_ = false;
2062            onChanged();
2063            return this;
2064          }
2065    
2066          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.SegmentStateProto)
2067        }
2068    
2069        static {
2070          defaultInstance = new SegmentStateProto(true);
2071          defaultInstance.initFields();
2072        }
2073    
2074        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.SegmentStateProto)
2075      }
2076    
2077      public interface PersistedRecoveryPaxosDataOrBuilder
2078          extends com.google.protobuf.MessageOrBuilder {
2079    
2080        // required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;
2081        /**
2082         * <code>required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
2083         */
2084        boolean hasSegmentState();
2085        /**
2086         * <code>required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
2087         */
2088        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState();
2089        /**
2090         * <code>required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
2091         */
2092        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder();
2093    
2094        // required uint64 acceptedInEpoch = 2;
2095        /**
2096         * <code>required uint64 acceptedInEpoch = 2;</code>
2097         */
2098        boolean hasAcceptedInEpoch();
2099        /**
2100         * <code>required uint64 acceptedInEpoch = 2;</code>
2101         */
2102        long getAcceptedInEpoch();
2103      }
2104      /**
2105       * Protobuf type {@code hadoop.hdfs.qjournal.PersistedRecoveryPaxosData}
2106       *
2107       * <pre>
2108       **
2109       * The storage format used on local disk for previously
2110       * accepted decisions.
2111       * </pre>
2112       */
2113      public static final class PersistedRecoveryPaxosData extends
2114          com.google.protobuf.GeneratedMessage
2115          implements PersistedRecoveryPaxosDataOrBuilder {
2116        // Use PersistedRecoveryPaxosData.newBuilder() to construct.
2117        private PersistedRecoveryPaxosData(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
2118          super(builder);
2119          this.unknownFields = builder.getUnknownFields();
2120        }
2121        private PersistedRecoveryPaxosData(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
2122    
2123        private static final PersistedRecoveryPaxosData defaultInstance;
2124        public static PersistedRecoveryPaxosData getDefaultInstance() {
2125          return defaultInstance;
2126        }
2127    
2128        public PersistedRecoveryPaxosData getDefaultInstanceForType() {
2129          return defaultInstance;
2130        }
2131    
2132        private final com.google.protobuf.UnknownFieldSet unknownFields;
2133        @java.lang.Override
2134        public final com.google.protobuf.UnknownFieldSet
2135            getUnknownFields() {
2136          return this.unknownFields;
2137        }
2138        private PersistedRecoveryPaxosData(
2139            com.google.protobuf.CodedInputStream input,
2140            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2141            throws com.google.protobuf.InvalidProtocolBufferException {
2142          initFields();
2143          int mutable_bitField0_ = 0;
2144          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
2145              com.google.protobuf.UnknownFieldSet.newBuilder();
2146          try {
2147            boolean done = false;
2148            while (!done) {
2149              int tag = input.readTag();
2150              switch (tag) {
2151                case 0:
2152                  done = true;
2153                  break;
2154                default: {
2155                  if (!parseUnknownField(input, unknownFields,
2156                                         extensionRegistry, tag)) {
2157                    done = true;
2158                  }
2159                  break;
2160                }
2161                case 10: {
2162                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder subBuilder = null;
2163                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
2164                    subBuilder = segmentState_.toBuilder();
2165                  }
2166                  segmentState_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.PARSER, extensionRegistry);
2167                  if (subBuilder != null) {
2168                    subBuilder.mergeFrom(segmentState_);
2169                    segmentState_ = subBuilder.buildPartial();
2170                  }
2171                  bitField0_ |= 0x00000001;
2172                  break;
2173                }
2174                case 16: {
2175                  bitField0_ |= 0x00000002;
2176                  acceptedInEpoch_ = input.readUInt64();
2177                  break;
2178                }
2179              }
2180            }
2181          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
2182            throw e.setUnfinishedMessage(this);
2183          } catch (java.io.IOException e) {
2184            throw new com.google.protobuf.InvalidProtocolBufferException(
2185                e.getMessage()).setUnfinishedMessage(this);
2186          } finally {
2187            this.unknownFields = unknownFields.build();
2188            makeExtensionsImmutable();
2189          }
2190        }
2191        public static final com.google.protobuf.Descriptors.Descriptor
2192            getDescriptor() {
2193          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PersistedRecoveryPaxosData_descriptor;
2194        }
2195    
2196        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
2197            internalGetFieldAccessorTable() {
2198          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PersistedRecoveryPaxosData_fieldAccessorTable
2199              .ensureFieldAccessorsInitialized(
2200                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.Builder.class);
2201        }
2202    
2203        public static com.google.protobuf.Parser<PersistedRecoveryPaxosData> PARSER =
2204            new com.google.protobuf.AbstractParser<PersistedRecoveryPaxosData>() {
2205          public PersistedRecoveryPaxosData parsePartialFrom(
2206              com.google.protobuf.CodedInputStream input,
2207              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2208              throws com.google.protobuf.InvalidProtocolBufferException {
2209            return new PersistedRecoveryPaxosData(input, extensionRegistry);
2210          }
2211        };
2212    
2213        @java.lang.Override
2214        public com.google.protobuf.Parser<PersistedRecoveryPaxosData> getParserForType() {
2215          return PARSER;
2216        }
2217    
2218        private int bitField0_;
2219        // required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;
2220        public static final int SEGMENTSTATE_FIELD_NUMBER = 1;
2221        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_;
2222        /**
2223         * <code>required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
2224         */
2225        public boolean hasSegmentState() {
2226          return ((bitField0_ & 0x00000001) == 0x00000001);
2227        }
2228        /**
2229         * <code>required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
2230         */
2231        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
2232          return segmentState_;
2233        }
2234        /**
2235         * <code>required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
2236         */
2237        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
2238          return segmentState_;
2239        }
2240    
2241        // required uint64 acceptedInEpoch = 2;
2242        public static final int ACCEPTEDINEPOCH_FIELD_NUMBER = 2;
2243        private long acceptedInEpoch_;
2244        /**
2245         * <code>required uint64 acceptedInEpoch = 2;</code>
2246         */
2247        public boolean hasAcceptedInEpoch() {
2248          return ((bitField0_ & 0x00000002) == 0x00000002);
2249        }
2250        /**
2251         * <code>required uint64 acceptedInEpoch = 2;</code>
2252         */
2253        public long getAcceptedInEpoch() {
2254          return acceptedInEpoch_;
2255        }
2256    
2257        private void initFields() {
2258          segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
2259          acceptedInEpoch_ = 0L;
2260        }
2261        private byte memoizedIsInitialized = -1;
2262        public final boolean isInitialized() {
2263          byte isInitialized = memoizedIsInitialized;
2264          if (isInitialized != -1) return isInitialized == 1;
2265    
2266          if (!hasSegmentState()) {
2267            memoizedIsInitialized = 0;
2268            return false;
2269          }
2270          if (!hasAcceptedInEpoch()) {
2271            memoizedIsInitialized = 0;
2272            return false;
2273          }
2274          if (!getSegmentState().isInitialized()) {
2275            memoizedIsInitialized = 0;
2276            return false;
2277          }
2278          memoizedIsInitialized = 1;
2279          return true;
2280        }
2281    
2282        public void writeTo(com.google.protobuf.CodedOutputStream output)
2283                            throws java.io.IOException {
2284          getSerializedSize();
2285          if (((bitField0_ & 0x00000001) == 0x00000001)) {
2286            output.writeMessage(1, segmentState_);
2287          }
2288          if (((bitField0_ & 0x00000002) == 0x00000002)) {
2289            output.writeUInt64(2, acceptedInEpoch_);
2290          }
2291          getUnknownFields().writeTo(output);
2292        }
2293    
2294        private int memoizedSerializedSize = -1;
2295        public int getSerializedSize() {
2296          int size = memoizedSerializedSize;
2297          if (size != -1) return size;
2298    
2299          size = 0;
2300          if (((bitField0_ & 0x00000001) == 0x00000001)) {
2301            size += com.google.protobuf.CodedOutputStream
2302              .computeMessageSize(1, segmentState_);
2303          }
2304          if (((bitField0_ & 0x00000002) == 0x00000002)) {
2305            size += com.google.protobuf.CodedOutputStream
2306              .computeUInt64Size(2, acceptedInEpoch_);
2307          }
2308          size += getUnknownFields().getSerializedSize();
2309          memoizedSerializedSize = size;
2310          return size;
2311        }
2312    
2313        private static final long serialVersionUID = 0L;
2314        @java.lang.Override
2315        protected java.lang.Object writeReplace()
2316            throws java.io.ObjectStreamException {
2317          return super.writeReplace();
2318        }
2319    
2320        @java.lang.Override
2321        public boolean equals(final java.lang.Object obj) {
2322          if (obj == this) {
2323           return true;
2324          }
2325          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData)) {
2326            return super.equals(obj);
2327          }
2328          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData) obj;
2329    
2330          boolean result = true;
2331          result = result && (hasSegmentState() == other.hasSegmentState());
2332          if (hasSegmentState()) {
2333            result = result && getSegmentState()
2334                .equals(other.getSegmentState());
2335          }
2336          result = result && (hasAcceptedInEpoch() == other.hasAcceptedInEpoch());
2337          if (hasAcceptedInEpoch()) {
2338            result = result && (getAcceptedInEpoch()
2339                == other.getAcceptedInEpoch());
2340          }
2341          result = result &&
2342              getUnknownFields().equals(other.getUnknownFields());
2343          return result;
2344        }
2345    
2346        private int memoizedHashCode = 0;
2347        @java.lang.Override
2348        public int hashCode() {
2349          if (memoizedHashCode != 0) {
2350            return memoizedHashCode;
2351          }
2352          int hash = 41;
2353          hash = (19 * hash) + getDescriptorForType().hashCode();
2354          if (hasSegmentState()) {
2355            hash = (37 * hash) + SEGMENTSTATE_FIELD_NUMBER;
2356            hash = (53 * hash) + getSegmentState().hashCode();
2357          }
2358          if (hasAcceptedInEpoch()) {
2359            hash = (37 * hash) + ACCEPTEDINEPOCH_FIELD_NUMBER;
2360            hash = (53 * hash) + hashLong(getAcceptedInEpoch());
2361          }
2362          hash = (29 * hash) + getUnknownFields().hashCode();
2363          memoizedHashCode = hash;
2364          return hash;
2365        }
2366    
2367        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2368            com.google.protobuf.ByteString data)
2369            throws com.google.protobuf.InvalidProtocolBufferException {
2370          return PARSER.parseFrom(data);
2371        }
2372        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2373            com.google.protobuf.ByteString data,
2374            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2375            throws com.google.protobuf.InvalidProtocolBufferException {
2376          return PARSER.parseFrom(data, extensionRegistry);
2377        }
2378        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(byte[] data)
2379            throws com.google.protobuf.InvalidProtocolBufferException {
2380          return PARSER.parseFrom(data);
2381        }
2382        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2383            byte[] data,
2384            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2385            throws com.google.protobuf.InvalidProtocolBufferException {
2386          return PARSER.parseFrom(data, extensionRegistry);
2387        }
2388        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(java.io.InputStream input)
2389            throws java.io.IOException {
2390          return PARSER.parseFrom(input);
2391        }
2392        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2393            java.io.InputStream input,
2394            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2395            throws java.io.IOException {
2396          return PARSER.parseFrom(input, extensionRegistry);
2397        }
2398        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseDelimitedFrom(java.io.InputStream input)
2399            throws java.io.IOException {
2400          return PARSER.parseDelimitedFrom(input);
2401        }
2402        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseDelimitedFrom(
2403            java.io.InputStream input,
2404            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2405            throws java.io.IOException {
2406          return PARSER.parseDelimitedFrom(input, extensionRegistry);
2407        }
2408        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2409            com.google.protobuf.CodedInputStream input)
2410            throws java.io.IOException {
2411          return PARSER.parseFrom(input);
2412        }
2413        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
2414            com.google.protobuf.CodedInputStream input,
2415            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2416            throws java.io.IOException {
2417          return PARSER.parseFrom(input, extensionRegistry);
2418        }
2419    
2420        public static Builder newBuilder() { return Builder.create(); }
2421        public Builder newBuilderForType() { return newBuilder(); }
2422        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData prototype) {
2423          return newBuilder().mergeFrom(prototype);
2424        }
2425        public Builder toBuilder() { return newBuilder(this); }
2426    
2427        @java.lang.Override
2428        protected Builder newBuilderForType(
2429            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
2430          Builder builder = new Builder(parent);
2431          return builder;
2432        }
2433        /**
2434         * Protobuf type {@code hadoop.hdfs.qjournal.PersistedRecoveryPaxosData}
2435         *
2436         * <pre>
2437         **
2438         * The storage format used on local disk for previously
2439         * accepted decisions.
2440         * </pre>
2441         */
2442        public static final class Builder extends
2443            com.google.protobuf.GeneratedMessage.Builder<Builder>
2444           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosDataOrBuilder {
2445          public static final com.google.protobuf.Descriptors.Descriptor
2446              getDescriptor() {
2447            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PersistedRecoveryPaxosData_descriptor;
2448          }
2449    
2450          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
2451              internalGetFieldAccessorTable() {
2452            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PersistedRecoveryPaxosData_fieldAccessorTable
2453                .ensureFieldAccessorsInitialized(
2454                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.Builder.class);
2455          }
2456    
2457          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.newBuilder()
2458          private Builder() {
2459            maybeForceBuilderInitialization();
2460          }
2461    
2462          private Builder(
2463              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
2464            super(parent);
2465            maybeForceBuilderInitialization();
2466          }
2467          private void maybeForceBuilderInitialization() {
2468            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
2469              getSegmentStateFieldBuilder();
2470            }
2471          }
2472          private static Builder create() {
2473            return new Builder();
2474          }
2475    
2476          public Builder clear() {
2477            super.clear();
2478            if (segmentStateBuilder_ == null) {
2479              segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
2480            } else {
2481              segmentStateBuilder_.clear();
2482            }
2483            bitField0_ = (bitField0_ & ~0x00000001);
2484            acceptedInEpoch_ = 0L;
2485            bitField0_ = (bitField0_ & ~0x00000002);
2486            return this;
2487          }
2488    
2489          public Builder clone() {
2490            return create().mergeFrom(buildPartial());
2491          }
2492    
2493          public com.google.protobuf.Descriptors.Descriptor
2494              getDescriptorForType() {
2495            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PersistedRecoveryPaxosData_descriptor;
2496          }
2497    
2498          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData getDefaultInstanceForType() {
2499            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.getDefaultInstance();
2500          }
2501    
2502          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData build() {
2503            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData result = buildPartial();
2504            if (!result.isInitialized()) {
2505              throw newUninitializedMessageException(result);
2506            }
2507            return result;
2508          }
2509    
2510          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData buildPartial() {
2511            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData(this);
2512            int from_bitField0_ = bitField0_;
2513            int to_bitField0_ = 0;
2514            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
2515              to_bitField0_ |= 0x00000001;
2516            }
2517            if (segmentStateBuilder_ == null) {
2518              result.segmentState_ = segmentState_;
2519            } else {
2520              result.segmentState_ = segmentStateBuilder_.build();
2521            }
2522            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
2523              to_bitField0_ |= 0x00000002;
2524            }
2525            result.acceptedInEpoch_ = acceptedInEpoch_;
2526            result.bitField0_ = to_bitField0_;
2527            onBuilt();
2528            return result;
2529          }
2530    
2531          public Builder mergeFrom(com.google.protobuf.Message other) {
2532            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData) {
2533              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData)other);
2534            } else {
2535              super.mergeFrom(other);
2536              return this;
2537            }
2538          }
2539    
2540          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData other) {
2541            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.getDefaultInstance()) return this;
2542            if (other.hasSegmentState()) {
2543              mergeSegmentState(other.getSegmentState());
2544            }
2545            if (other.hasAcceptedInEpoch()) {
2546              setAcceptedInEpoch(other.getAcceptedInEpoch());
2547            }
2548            this.mergeUnknownFields(other.getUnknownFields());
2549            return this;
2550          }
2551    
2552          public final boolean isInitialized() {
2553            if (!hasSegmentState()) {
2554              
2555              return false;
2556            }
2557            if (!hasAcceptedInEpoch()) {
2558              
2559              return false;
2560            }
2561            if (!getSegmentState().isInitialized()) {
2562              
2563              return false;
2564            }
2565            return true;
2566          }
2567    
2568          public Builder mergeFrom(
2569              com.google.protobuf.CodedInputStream input,
2570              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2571              throws java.io.IOException {
2572            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parsedMessage = null;
2573            try {
2574              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
2575            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
2576              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData) e.getUnfinishedMessage();
2577              throw e;
2578            } finally {
2579              if (parsedMessage != null) {
2580                mergeFrom(parsedMessage);
2581              }
2582            }
2583            return this;
2584          }
2585          private int bitField0_;
2586    
2587          // required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;
2588          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
2589          private com.google.protobuf.SingleFieldBuilder<
2590              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> segmentStateBuilder_;
2591          /**
2592           * <code>required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
2593           */
2594          public boolean hasSegmentState() {
2595            return ((bitField0_ & 0x00000001) == 0x00000001);
2596          }
2597          /**
2598           * <code>required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
2599           */
2600          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
2601            if (segmentStateBuilder_ == null) {
2602              return segmentState_;
2603            } else {
2604              return segmentStateBuilder_.getMessage();
2605            }
2606          }
2607          /**
2608           * <code>required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
2609           */
2610          public Builder setSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
2611            if (segmentStateBuilder_ == null) {
2612              if (value == null) {
2613                throw new NullPointerException();
2614              }
2615              segmentState_ = value;
2616              onChanged();
2617            } else {
2618              segmentStateBuilder_.setMessage(value);
2619            }
2620            bitField0_ |= 0x00000001;
2621            return this;
2622          }
2623          /**
2624           * <code>required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
2625           */
2626          public Builder setSegmentState(
2627              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) {
2628            if (segmentStateBuilder_ == null) {
2629              segmentState_ = builderForValue.build();
2630              onChanged();
2631            } else {
2632              segmentStateBuilder_.setMessage(builderForValue.build());
2633            }
2634            bitField0_ |= 0x00000001;
2635            return this;
2636          }
2637          /**
2638           * <code>required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
2639           */
2640          public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
2641            if (segmentStateBuilder_ == null) {
2642              if (((bitField0_ & 0x00000001) == 0x00000001) &&
2643                  segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
2644                segmentState_ =
2645                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial();
2646              } else {
2647                segmentState_ = value;
2648              }
2649              onChanged();
2650            } else {
2651              segmentStateBuilder_.mergeFrom(value);
2652            }
2653            bitField0_ |= 0x00000001;
2654            return this;
2655          }
2656          /**
2657           * <code>required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
2658           */
2659          public Builder clearSegmentState() {
2660            if (segmentStateBuilder_ == null) {
2661              segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
2662              onChanged();
2663            } else {
2664              segmentStateBuilder_.clear();
2665            }
2666            bitField0_ = (bitField0_ & ~0x00000001);
2667            return this;
2668          }
2669          /**
2670           * <code>required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
2671           */
2672          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getSegmentStateBuilder() {
2673            bitField0_ |= 0x00000001;
2674            onChanged();
2675            return getSegmentStateFieldBuilder().getBuilder();
2676          }
2677          /**
2678           * <code>required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
2679           */
2680          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
2681            if (segmentStateBuilder_ != null) {
2682              return segmentStateBuilder_.getMessageOrBuilder();
2683            } else {
2684              return segmentState_;
2685            }
2686          }
2687          /**
2688           * <code>required .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
2689           */
2690          private com.google.protobuf.SingleFieldBuilder<
2691              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> 
2692              getSegmentStateFieldBuilder() {
2693            if (segmentStateBuilder_ == null) {
2694              segmentStateBuilder_ = new com.google.protobuf.SingleFieldBuilder<
2695                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>(
2696                      segmentState_,
2697                      getParentForChildren(),
2698                      isClean());
2699              segmentState_ = null;
2700            }
2701            return segmentStateBuilder_;
2702          }
2703    
2704          // required uint64 acceptedInEpoch = 2;
2705          private long acceptedInEpoch_ ;
2706          /**
2707           * <code>required uint64 acceptedInEpoch = 2;</code>
2708           */
2709          public boolean hasAcceptedInEpoch() {
2710            return ((bitField0_ & 0x00000002) == 0x00000002);
2711          }
2712          /**
2713           * <code>required uint64 acceptedInEpoch = 2;</code>
2714           */
2715          public long getAcceptedInEpoch() {
2716            return acceptedInEpoch_;
2717          }
2718          /**
2719           * <code>required uint64 acceptedInEpoch = 2;</code>
2720           */
2721          public Builder setAcceptedInEpoch(long value) {
2722            bitField0_ |= 0x00000002;
2723            acceptedInEpoch_ = value;
2724            onChanged();
2725            return this;
2726          }
2727          /**
2728           * <code>required uint64 acceptedInEpoch = 2;</code>
2729           */
2730          public Builder clearAcceptedInEpoch() {
2731            bitField0_ = (bitField0_ & ~0x00000002);
2732            acceptedInEpoch_ = 0L;
2733            onChanged();
2734            return this;
2735          }
2736    
2737          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.PersistedRecoveryPaxosData)
2738        }
2739    
2740        static {
2741          defaultInstance = new PersistedRecoveryPaxosData(true);
2742          defaultInstance.initFields();
2743        }
2744    
2745        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.PersistedRecoveryPaxosData)
2746      }
2747    
2748      public interface JournalRequestProtoOrBuilder
2749          extends com.google.protobuf.MessageOrBuilder {
2750    
2751        // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;
2752        /**
2753         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
2754         */
2755        boolean hasReqInfo();
2756        /**
2757         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
2758         */
2759        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
2760        /**
2761         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
2762         */
2763        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
2764    
2765        // required uint64 firstTxnId = 2;
2766        /**
2767         * <code>required uint64 firstTxnId = 2;</code>
2768         */
2769        boolean hasFirstTxnId();
2770        /**
2771         * <code>required uint64 firstTxnId = 2;</code>
2772         */
2773        long getFirstTxnId();
2774    
2775        // required uint32 numTxns = 3;
2776        /**
2777         * <code>required uint32 numTxns = 3;</code>
2778         */
2779        boolean hasNumTxns();
2780        /**
2781         * <code>required uint32 numTxns = 3;</code>
2782         */
2783        int getNumTxns();
2784    
2785        // required bytes records = 4;
2786        /**
2787         * <code>required bytes records = 4;</code>
2788         */
2789        boolean hasRecords();
2790        /**
2791         * <code>required bytes records = 4;</code>
2792         */
2793        com.google.protobuf.ByteString getRecords();
2794    
2795        // required uint64 segmentTxnId = 5;
2796        /**
2797         * <code>required uint64 segmentTxnId = 5;</code>
2798         */
2799        boolean hasSegmentTxnId();
2800        /**
2801         * <code>required uint64 segmentTxnId = 5;</code>
2802         */
2803        long getSegmentTxnId();
2804      }
2805      /**
2806       * Protobuf type {@code hadoop.hdfs.qjournal.JournalRequestProto}
2807       */
2808      public static final class JournalRequestProto extends
2809          com.google.protobuf.GeneratedMessage
2810          implements JournalRequestProtoOrBuilder {
2811        // Use JournalRequestProto.newBuilder() to construct.
2812        private JournalRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
2813          super(builder);
2814          this.unknownFields = builder.getUnknownFields();
2815        }
2816        private JournalRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
2817    
2818        private static final JournalRequestProto defaultInstance;
2819        public static JournalRequestProto getDefaultInstance() {
2820          return defaultInstance;
2821        }
2822    
2823        public JournalRequestProto getDefaultInstanceForType() {
2824          return defaultInstance;
2825        }
2826    
2827        private final com.google.protobuf.UnknownFieldSet unknownFields;
2828        @java.lang.Override
2829        public final com.google.protobuf.UnknownFieldSet
2830            getUnknownFields() {
2831          return this.unknownFields;
2832        }
2833        private JournalRequestProto(
2834            com.google.protobuf.CodedInputStream input,
2835            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2836            throws com.google.protobuf.InvalidProtocolBufferException {
2837          initFields();
2838          int mutable_bitField0_ = 0;
2839          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
2840              com.google.protobuf.UnknownFieldSet.newBuilder();
2841          try {
2842            boolean done = false;
2843            while (!done) {
2844              int tag = input.readTag();
2845              switch (tag) {
2846                case 0:
2847                  done = true;
2848                  break;
2849                default: {
2850                  if (!parseUnknownField(input, unknownFields,
2851                                         extensionRegistry, tag)) {
2852                    done = true;
2853                  }
2854                  break;
2855                }
2856                case 10: {
2857                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
2858                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
2859                    subBuilder = reqInfo_.toBuilder();
2860                  }
2861                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
2862                  if (subBuilder != null) {
2863                    subBuilder.mergeFrom(reqInfo_);
2864                    reqInfo_ = subBuilder.buildPartial();
2865                  }
2866                  bitField0_ |= 0x00000001;
2867                  break;
2868                }
2869                case 16: {
2870                  bitField0_ |= 0x00000002;
2871                  firstTxnId_ = input.readUInt64();
2872                  break;
2873                }
2874                case 24: {
2875                  bitField0_ |= 0x00000004;
2876                  numTxns_ = input.readUInt32();
2877                  break;
2878                }
2879                case 34: {
2880                  bitField0_ |= 0x00000008;
2881                  records_ = input.readBytes();
2882                  break;
2883                }
2884                case 40: {
2885                  bitField0_ |= 0x00000010;
2886                  segmentTxnId_ = input.readUInt64();
2887                  break;
2888                }
2889              }
2890            }
2891          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
2892            throw e.setUnfinishedMessage(this);
2893          } catch (java.io.IOException e) {
2894            throw new com.google.protobuf.InvalidProtocolBufferException(
2895                e.getMessage()).setUnfinishedMessage(this);
2896          } finally {
2897            this.unknownFields = unknownFields.build();
2898            makeExtensionsImmutable();
2899          }
2900        }
2901        public static final com.google.protobuf.Descriptors.Descriptor
2902            getDescriptor() {
2903          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalRequestProto_descriptor;
2904        }
2905    
2906        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
2907            internalGetFieldAccessorTable() {
2908          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalRequestProto_fieldAccessorTable
2909              .ensureFieldAccessorsInitialized(
2910                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.Builder.class);
2911        }
2912    
2913        public static com.google.protobuf.Parser<JournalRequestProto> PARSER =
2914            new com.google.protobuf.AbstractParser<JournalRequestProto>() {
2915          public JournalRequestProto parsePartialFrom(
2916              com.google.protobuf.CodedInputStream input,
2917              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2918              throws com.google.protobuf.InvalidProtocolBufferException {
2919            return new JournalRequestProto(input, extensionRegistry);
2920          }
2921        };
2922    
2923        @java.lang.Override
2924        public com.google.protobuf.Parser<JournalRequestProto> getParserForType() {
2925          return PARSER;
2926        }
2927    
2928        private int bitField0_;
2929        // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;
2930        public static final int REQINFO_FIELD_NUMBER = 1;
2931        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
2932        /**
2933         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
2934         */
2935        public boolean hasReqInfo() {
2936          return ((bitField0_ & 0x00000001) == 0x00000001);
2937        }
2938        /**
2939         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
2940         */
2941        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
2942          return reqInfo_;
2943        }
2944        /**
2945         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
2946         */
2947        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
2948          return reqInfo_;
2949        }
2950    
2951        // required uint64 firstTxnId = 2;
2952        public static final int FIRSTTXNID_FIELD_NUMBER = 2;
2953        private long firstTxnId_;
2954        /**
2955         * <code>required uint64 firstTxnId = 2;</code>
2956         */
2957        public boolean hasFirstTxnId() {
2958          return ((bitField0_ & 0x00000002) == 0x00000002);
2959        }
2960        /**
2961         * <code>required uint64 firstTxnId = 2;</code>
2962         */
2963        public long getFirstTxnId() {
2964          return firstTxnId_;
2965        }
2966    
2967        // required uint32 numTxns = 3;
2968        public static final int NUMTXNS_FIELD_NUMBER = 3;
2969        private int numTxns_;
2970        /**
2971         * <code>required uint32 numTxns = 3;</code>
2972         */
2973        public boolean hasNumTxns() {
2974          return ((bitField0_ & 0x00000004) == 0x00000004);
2975        }
2976        /**
2977         * <code>required uint32 numTxns = 3;</code>
2978         */
2979        public int getNumTxns() {
2980          return numTxns_;
2981        }
2982    
2983        // required bytes records = 4;
2984        public static final int RECORDS_FIELD_NUMBER = 4;
2985        private com.google.protobuf.ByteString records_;
2986        /**
2987         * <code>required bytes records = 4;</code>
2988         */
2989        public boolean hasRecords() {
2990          return ((bitField0_ & 0x00000008) == 0x00000008);
2991        }
2992        /**
2993         * <code>required bytes records = 4;</code>
2994         */
2995        public com.google.protobuf.ByteString getRecords() {
2996          return records_;
2997        }
2998    
2999        // required uint64 segmentTxnId = 5;
3000        public static final int SEGMENTTXNID_FIELD_NUMBER = 5;
3001        private long segmentTxnId_;
3002        /**
3003         * <code>required uint64 segmentTxnId = 5;</code>
3004         */
3005        public boolean hasSegmentTxnId() {
3006          return ((bitField0_ & 0x00000010) == 0x00000010);
3007        }
3008        /**
3009         * <code>required uint64 segmentTxnId = 5;</code>
3010         */
3011        public long getSegmentTxnId() {
3012          return segmentTxnId_;
3013        }
3014    
3015        private void initFields() {
3016          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
3017          firstTxnId_ = 0L;
3018          numTxns_ = 0;
3019          records_ = com.google.protobuf.ByteString.EMPTY;
3020          segmentTxnId_ = 0L;
3021        }
3022        private byte memoizedIsInitialized = -1;
3023        public final boolean isInitialized() {
3024          byte isInitialized = memoizedIsInitialized;
3025          if (isInitialized != -1) return isInitialized == 1;
3026    
3027          if (!hasReqInfo()) {
3028            memoizedIsInitialized = 0;
3029            return false;
3030          }
3031          if (!hasFirstTxnId()) {
3032            memoizedIsInitialized = 0;
3033            return false;
3034          }
3035          if (!hasNumTxns()) {
3036            memoizedIsInitialized = 0;
3037            return false;
3038          }
3039          if (!hasRecords()) {
3040            memoizedIsInitialized = 0;
3041            return false;
3042          }
3043          if (!hasSegmentTxnId()) {
3044            memoizedIsInitialized = 0;
3045            return false;
3046          }
3047          if (!getReqInfo().isInitialized()) {
3048            memoizedIsInitialized = 0;
3049            return false;
3050          }
3051          memoizedIsInitialized = 1;
3052          return true;
3053        }
3054    
3055        public void writeTo(com.google.protobuf.CodedOutputStream output)
3056                            throws java.io.IOException {
3057          getSerializedSize();
3058          if (((bitField0_ & 0x00000001) == 0x00000001)) {
3059            output.writeMessage(1, reqInfo_);
3060          }
3061          if (((bitField0_ & 0x00000002) == 0x00000002)) {
3062            output.writeUInt64(2, firstTxnId_);
3063          }
3064          if (((bitField0_ & 0x00000004) == 0x00000004)) {
3065            output.writeUInt32(3, numTxns_);
3066          }
3067          if (((bitField0_ & 0x00000008) == 0x00000008)) {
3068            output.writeBytes(4, records_);
3069          }
3070          if (((bitField0_ & 0x00000010) == 0x00000010)) {
3071            output.writeUInt64(5, segmentTxnId_);
3072          }
3073          getUnknownFields().writeTo(output);
3074        }
3075    
3076        private int memoizedSerializedSize = -1;
3077        public int getSerializedSize() {
3078          int size = memoizedSerializedSize;
3079          if (size != -1) return size;
3080    
3081          size = 0;
3082          if (((bitField0_ & 0x00000001) == 0x00000001)) {
3083            size += com.google.protobuf.CodedOutputStream
3084              .computeMessageSize(1, reqInfo_);
3085          }
3086          if (((bitField0_ & 0x00000002) == 0x00000002)) {
3087            size += com.google.protobuf.CodedOutputStream
3088              .computeUInt64Size(2, firstTxnId_);
3089          }
3090          if (((bitField0_ & 0x00000004) == 0x00000004)) {
3091            size += com.google.protobuf.CodedOutputStream
3092              .computeUInt32Size(3, numTxns_);
3093          }
3094          if (((bitField0_ & 0x00000008) == 0x00000008)) {
3095            size += com.google.protobuf.CodedOutputStream
3096              .computeBytesSize(4, records_);
3097          }
3098          if (((bitField0_ & 0x00000010) == 0x00000010)) {
3099            size += com.google.protobuf.CodedOutputStream
3100              .computeUInt64Size(5, segmentTxnId_);
3101          }
3102          size += getUnknownFields().getSerializedSize();
3103          memoizedSerializedSize = size;
3104          return size;
3105        }
3106    
3107        private static final long serialVersionUID = 0L;
3108        @java.lang.Override
3109        protected java.lang.Object writeReplace()
3110            throws java.io.ObjectStreamException {
3111          return super.writeReplace();
3112        }
3113    
3114        @java.lang.Override
3115        public boolean equals(final java.lang.Object obj) {
3116          if (obj == this) {
3117           return true;
3118          }
3119          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)) {
3120            return super.equals(obj);
3121          }
3122          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto) obj;
3123    
3124          boolean result = true;
3125          result = result && (hasReqInfo() == other.hasReqInfo());
3126          if (hasReqInfo()) {
3127            result = result && getReqInfo()
3128                .equals(other.getReqInfo());
3129          }
3130          result = result && (hasFirstTxnId() == other.hasFirstTxnId());
3131          if (hasFirstTxnId()) {
3132            result = result && (getFirstTxnId()
3133                == other.getFirstTxnId());
3134          }
3135          result = result && (hasNumTxns() == other.hasNumTxns());
3136          if (hasNumTxns()) {
3137            result = result && (getNumTxns()
3138                == other.getNumTxns());
3139          }
3140          result = result && (hasRecords() == other.hasRecords());
3141          if (hasRecords()) {
3142            result = result && getRecords()
3143                .equals(other.getRecords());
3144          }
3145          result = result && (hasSegmentTxnId() == other.hasSegmentTxnId());
3146          if (hasSegmentTxnId()) {
3147            result = result && (getSegmentTxnId()
3148                == other.getSegmentTxnId());
3149          }
3150          result = result &&
3151              getUnknownFields().equals(other.getUnknownFields());
3152          return result;
3153        }
3154    
3155        private int memoizedHashCode = 0;
3156        @java.lang.Override
3157        public int hashCode() {
3158          if (memoizedHashCode != 0) {
3159            return memoizedHashCode;
3160          }
3161          int hash = 41;
3162          hash = (19 * hash) + getDescriptorForType().hashCode();
3163          if (hasReqInfo()) {
3164            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
3165            hash = (53 * hash) + getReqInfo().hashCode();
3166          }
3167          if (hasFirstTxnId()) {
3168            hash = (37 * hash) + FIRSTTXNID_FIELD_NUMBER;
3169            hash = (53 * hash) + hashLong(getFirstTxnId());
3170          }
3171          if (hasNumTxns()) {
3172            hash = (37 * hash) + NUMTXNS_FIELD_NUMBER;
3173            hash = (53 * hash) + getNumTxns();
3174          }
3175          if (hasRecords()) {
3176            hash = (37 * hash) + RECORDS_FIELD_NUMBER;
3177            hash = (53 * hash) + getRecords().hashCode();
3178          }
3179          if (hasSegmentTxnId()) {
3180            hash = (37 * hash) + SEGMENTTXNID_FIELD_NUMBER;
3181            hash = (53 * hash) + hashLong(getSegmentTxnId());
3182          }
3183          hash = (29 * hash) + getUnknownFields().hashCode();
3184          memoizedHashCode = hash;
3185          return hash;
3186        }
3187    
3188        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3189            com.google.protobuf.ByteString data)
3190            throws com.google.protobuf.InvalidProtocolBufferException {
3191          return PARSER.parseFrom(data);
3192        }
3193        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3194            com.google.protobuf.ByteString data,
3195            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3196            throws com.google.protobuf.InvalidProtocolBufferException {
3197          return PARSER.parseFrom(data, extensionRegistry);
3198        }
3199        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(byte[] data)
3200            throws com.google.protobuf.InvalidProtocolBufferException {
3201          return PARSER.parseFrom(data);
3202        }
3203        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3204            byte[] data,
3205            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3206            throws com.google.protobuf.InvalidProtocolBufferException {
3207          return PARSER.parseFrom(data, extensionRegistry);
3208        }
3209        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(java.io.InputStream input)
3210            throws java.io.IOException {
3211          return PARSER.parseFrom(input);
3212        }
3213        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3214            java.io.InputStream input,
3215            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3216            throws java.io.IOException {
3217          return PARSER.parseFrom(input, extensionRegistry);
3218        }
3219        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseDelimitedFrom(java.io.InputStream input)
3220            throws java.io.IOException {
3221          return PARSER.parseDelimitedFrom(input);
3222        }
3223        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseDelimitedFrom(
3224            java.io.InputStream input,
3225            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3226            throws java.io.IOException {
3227          return PARSER.parseDelimitedFrom(input, extensionRegistry);
3228        }
3229        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3230            com.google.protobuf.CodedInputStream input)
3231            throws java.io.IOException {
3232          return PARSER.parseFrom(input);
3233        }
3234        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
3235            com.google.protobuf.CodedInputStream input,
3236            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3237            throws java.io.IOException {
3238          return PARSER.parseFrom(input, extensionRegistry);
3239        }
3240    
3241        public static Builder newBuilder() { return Builder.create(); }
3242        public Builder newBuilderForType() { return newBuilder(); }
3243        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto prototype) {
3244          return newBuilder().mergeFrom(prototype);
3245        }
3246        public Builder toBuilder() { return newBuilder(this); }
3247    
3248        @java.lang.Override
3249        protected Builder newBuilderForType(
3250            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3251          Builder builder = new Builder(parent);
3252          return builder;
3253        }
3254        /**
3255         * Protobuf type {@code hadoop.hdfs.qjournal.JournalRequestProto}
3256         */
3257        public static final class Builder extends
3258            com.google.protobuf.GeneratedMessage.Builder<Builder>
3259           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProtoOrBuilder {
3260          public static final com.google.protobuf.Descriptors.Descriptor
3261              getDescriptor() {
3262            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalRequestProto_descriptor;
3263          }
3264    
3265          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
3266              internalGetFieldAccessorTable() {
3267            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalRequestProto_fieldAccessorTable
3268                .ensureFieldAccessorsInitialized(
3269                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.Builder.class);
3270          }
3271    
3272          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.newBuilder()
3273          private Builder() {
3274            maybeForceBuilderInitialization();
3275          }
3276    
3277          private Builder(
3278              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3279            super(parent);
3280            maybeForceBuilderInitialization();
3281          }
3282          private void maybeForceBuilderInitialization() {
3283            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
3284              getReqInfoFieldBuilder();
3285            }
3286          }
3287          private static Builder create() {
3288            return new Builder();
3289          }
3290    
3291          public Builder clear() {
3292            super.clear();
3293            if (reqInfoBuilder_ == null) {
3294              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
3295            } else {
3296              reqInfoBuilder_.clear();
3297            }
3298            bitField0_ = (bitField0_ & ~0x00000001);
3299            firstTxnId_ = 0L;
3300            bitField0_ = (bitField0_ & ~0x00000002);
3301            numTxns_ = 0;
3302            bitField0_ = (bitField0_ & ~0x00000004);
3303            records_ = com.google.protobuf.ByteString.EMPTY;
3304            bitField0_ = (bitField0_ & ~0x00000008);
3305            segmentTxnId_ = 0L;
3306            bitField0_ = (bitField0_ & ~0x00000010);
3307            return this;
3308          }
3309    
3310          public Builder clone() {
3311            return create().mergeFrom(buildPartial());
3312          }
3313    
3314          public com.google.protobuf.Descriptors.Descriptor
3315              getDescriptorForType() {
3316            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalRequestProto_descriptor;
3317          }
3318    
3319          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto getDefaultInstanceForType() {
3320            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance();
3321          }
3322    
3323          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto build() {
3324            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto result = buildPartial();
3325            if (!result.isInitialized()) {
3326              throw newUninitializedMessageException(result);
3327            }
3328            return result;
3329          }
3330    
3331          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto buildPartial() {
3332            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto(this);
3333            int from_bitField0_ = bitField0_;
3334            int to_bitField0_ = 0;
3335            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
3336              to_bitField0_ |= 0x00000001;
3337            }
3338            if (reqInfoBuilder_ == null) {
3339              result.reqInfo_ = reqInfo_;
3340            } else {
3341              result.reqInfo_ = reqInfoBuilder_.build();
3342            }
3343            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
3344              to_bitField0_ |= 0x00000002;
3345            }
3346            result.firstTxnId_ = firstTxnId_;
3347            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
3348              to_bitField0_ |= 0x00000004;
3349            }
3350            result.numTxns_ = numTxns_;
3351            if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
3352              to_bitField0_ |= 0x00000008;
3353            }
3354            result.records_ = records_;
3355            if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
3356              to_bitField0_ |= 0x00000010;
3357            }
3358            result.segmentTxnId_ = segmentTxnId_;
3359            result.bitField0_ = to_bitField0_;
3360            onBuilt();
3361            return result;
3362          }
3363    
3364          public Builder mergeFrom(com.google.protobuf.Message other) {
3365            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto) {
3366              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)other);
3367            } else {
3368              super.mergeFrom(other);
3369              return this;
3370            }
3371          }
3372    
3373          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto other) {
3374            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance()) return this;
3375            if (other.hasReqInfo()) {
3376              mergeReqInfo(other.getReqInfo());
3377            }
3378            if (other.hasFirstTxnId()) {
3379              setFirstTxnId(other.getFirstTxnId());
3380            }
3381            if (other.hasNumTxns()) {
3382              setNumTxns(other.getNumTxns());
3383            }
3384            if (other.hasRecords()) {
3385              setRecords(other.getRecords());
3386            }
3387            if (other.hasSegmentTxnId()) {
3388              setSegmentTxnId(other.getSegmentTxnId());
3389            }
3390            this.mergeUnknownFields(other.getUnknownFields());
3391            return this;
3392          }
3393    
3394          public final boolean isInitialized() {
3395            if (!hasReqInfo()) {
3396              
3397              return false;
3398            }
3399            if (!hasFirstTxnId()) {
3400              
3401              return false;
3402            }
3403            if (!hasNumTxns()) {
3404              
3405              return false;
3406            }
3407            if (!hasRecords()) {
3408              
3409              return false;
3410            }
3411            if (!hasSegmentTxnId()) {
3412              
3413              return false;
3414            }
3415            if (!getReqInfo().isInitialized()) {
3416              
3417              return false;
3418            }
3419            return true;
3420          }
3421    
3422          public Builder mergeFrom(
3423              com.google.protobuf.CodedInputStream input,
3424              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3425              throws java.io.IOException {
3426            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parsedMessage = null;
3427            try {
3428              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
3429            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
3430              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto) e.getUnfinishedMessage();
3431              throw e;
3432            } finally {
3433              if (parsedMessage != null) {
3434                mergeFrom(parsedMessage);
3435              }
3436            }
3437            return this;
3438          }
3439          private int bitField0_;
3440    
3441          // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;
3442          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
3443          private com.google.protobuf.SingleFieldBuilder<
3444              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
3445          /**
3446           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
3447           */
3448          public boolean hasReqInfo() {
3449            return ((bitField0_ & 0x00000001) == 0x00000001);
3450          }
3451          /**
3452           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
3453           */
3454          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
3455            if (reqInfoBuilder_ == null) {
3456              return reqInfo_;
3457            } else {
3458              return reqInfoBuilder_.getMessage();
3459            }
3460          }
3461          /**
3462           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
3463           */
3464          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
3465            if (reqInfoBuilder_ == null) {
3466              if (value == null) {
3467                throw new NullPointerException();
3468              }
3469              reqInfo_ = value;
3470              onChanged();
3471            } else {
3472              reqInfoBuilder_.setMessage(value);
3473            }
3474            bitField0_ |= 0x00000001;
3475            return this;
3476          }
3477          /**
3478           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
3479           */
3480          public Builder setReqInfo(
3481              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
3482            if (reqInfoBuilder_ == null) {
3483              reqInfo_ = builderForValue.build();
3484              onChanged();
3485            } else {
3486              reqInfoBuilder_.setMessage(builderForValue.build());
3487            }
3488            bitField0_ |= 0x00000001;
3489            return this;
3490          }
3491          /**
3492           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
3493           */
3494          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
3495            if (reqInfoBuilder_ == null) {
3496              if (((bitField0_ & 0x00000001) == 0x00000001) &&
3497                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
3498                reqInfo_ =
3499                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
3500              } else {
3501                reqInfo_ = value;
3502              }
3503              onChanged();
3504            } else {
3505              reqInfoBuilder_.mergeFrom(value);
3506            }
3507            bitField0_ |= 0x00000001;
3508            return this;
3509          }
3510          /**
3511           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
3512           */
3513          public Builder clearReqInfo() {
3514            if (reqInfoBuilder_ == null) {
3515              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
3516              onChanged();
3517            } else {
3518              reqInfoBuilder_.clear();
3519            }
3520            bitField0_ = (bitField0_ & ~0x00000001);
3521            return this;
3522          }
3523          /**
3524           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
3525           */
3526          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
3527            bitField0_ |= 0x00000001;
3528            onChanged();
3529            return getReqInfoFieldBuilder().getBuilder();
3530          }
3531          /**
3532           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
3533           */
3534          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
3535            if (reqInfoBuilder_ != null) {
3536              return reqInfoBuilder_.getMessageOrBuilder();
3537            } else {
3538              return reqInfo_;
3539            }
3540          }
3541          /**
3542           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
3543           */
3544          private com.google.protobuf.SingleFieldBuilder<
3545              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
3546              getReqInfoFieldBuilder() {
3547            if (reqInfoBuilder_ == null) {
3548              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
3549                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
3550                      reqInfo_,
3551                      getParentForChildren(),
3552                      isClean());
3553              reqInfo_ = null;
3554            }
3555            return reqInfoBuilder_;
3556          }
3557    
3558          // required uint64 firstTxnId = 2;
3559          private long firstTxnId_ ;
3560          /**
3561           * <code>required uint64 firstTxnId = 2;</code>
3562           */
3563          public boolean hasFirstTxnId() {
3564            return ((bitField0_ & 0x00000002) == 0x00000002);
3565          }
3566          /**
3567           * <code>required uint64 firstTxnId = 2;</code>
3568           */
3569          public long getFirstTxnId() {
3570            return firstTxnId_;
3571          }
3572          /**
3573           * <code>required uint64 firstTxnId = 2;</code>
3574           */
3575          public Builder setFirstTxnId(long value) {
3576            bitField0_ |= 0x00000002;
3577            firstTxnId_ = value;
3578            onChanged();
3579            return this;
3580          }
3581          /**
3582           * <code>required uint64 firstTxnId = 2;</code>
3583           */
3584          public Builder clearFirstTxnId() {
3585            bitField0_ = (bitField0_ & ~0x00000002);
3586            firstTxnId_ = 0L;
3587            onChanged();
3588            return this;
3589          }
3590    
3591          // required uint32 numTxns = 3;
3592          private int numTxns_ ;
3593          /**
3594           * <code>required uint32 numTxns = 3;</code>
3595           */
3596          public boolean hasNumTxns() {
3597            return ((bitField0_ & 0x00000004) == 0x00000004);
3598          }
3599          /**
3600           * <code>required uint32 numTxns = 3;</code>
3601           */
3602          public int getNumTxns() {
3603            return numTxns_;
3604          }
3605          /**
3606           * <code>required uint32 numTxns = 3;</code>
3607           */
3608          public Builder setNumTxns(int value) {
3609            bitField0_ |= 0x00000004;
3610            numTxns_ = value;
3611            onChanged();
3612            return this;
3613          }
3614          /**
3615           * <code>required uint32 numTxns = 3;</code>
3616           */
3617          public Builder clearNumTxns() {
3618            bitField0_ = (bitField0_ & ~0x00000004);
3619            numTxns_ = 0;
3620            onChanged();
3621            return this;
3622          }
3623    
3624          // required bytes records = 4;
3625          private com.google.protobuf.ByteString records_ = com.google.protobuf.ByteString.EMPTY;
3626          /**
3627           * <code>required bytes records = 4;</code>
3628           */
3629          public boolean hasRecords() {
3630            return ((bitField0_ & 0x00000008) == 0x00000008);
3631          }
3632          /**
3633           * <code>required bytes records = 4;</code>
3634           */
3635          public com.google.protobuf.ByteString getRecords() {
3636            return records_;
3637          }
3638          /**
3639           * <code>required bytes records = 4;</code>
3640           */
3641          public Builder setRecords(com.google.protobuf.ByteString value) {
3642            if (value == null) {
3643        throw new NullPointerException();
3644      }
3645      bitField0_ |= 0x00000008;
3646            records_ = value;
3647            onChanged();
3648            return this;
3649          }
3650          /**
3651           * <code>required bytes records = 4;</code>
3652           */
3653          public Builder clearRecords() {
3654            bitField0_ = (bitField0_ & ~0x00000008);
3655            records_ = getDefaultInstance().getRecords();
3656            onChanged();
3657            return this;
3658          }
3659    
3660          // required uint64 segmentTxnId = 5;
3661          private long segmentTxnId_ ;
3662          /**
3663           * <code>required uint64 segmentTxnId = 5;</code>
3664           */
3665          public boolean hasSegmentTxnId() {
3666            return ((bitField0_ & 0x00000010) == 0x00000010);
3667          }
3668          /**
3669           * <code>required uint64 segmentTxnId = 5;</code>
3670           */
3671          public long getSegmentTxnId() {
3672            return segmentTxnId_;
3673          }
3674          /**
3675           * <code>required uint64 segmentTxnId = 5;</code>
3676           */
3677          public Builder setSegmentTxnId(long value) {
3678            bitField0_ |= 0x00000010;
3679            segmentTxnId_ = value;
3680            onChanged();
3681            return this;
3682          }
3683          /**
3684           * <code>required uint64 segmentTxnId = 5;</code>
3685           */
3686          public Builder clearSegmentTxnId() {
3687            bitField0_ = (bitField0_ & ~0x00000010);
3688            segmentTxnId_ = 0L;
3689            onChanged();
3690            return this;
3691          }
3692    
3693          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.JournalRequestProto)
3694        }
3695    
3696        static {
3697          defaultInstance = new JournalRequestProto(true);
3698          defaultInstance.initFields();
3699        }
3700    
3701        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.JournalRequestProto)
3702      }
3703    
3704      public interface JournalResponseProtoOrBuilder
3705          extends com.google.protobuf.MessageOrBuilder {
3706      }
3707      /**
3708       * Protobuf type {@code hadoop.hdfs.qjournal.JournalResponseProto}
3709       */
3710      public static final class JournalResponseProto extends
3711          com.google.protobuf.GeneratedMessage
3712          implements JournalResponseProtoOrBuilder {
3713        // Use JournalResponseProto.newBuilder() to construct.
3714        private JournalResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
3715          super(builder);
3716          this.unknownFields = builder.getUnknownFields();
3717        }
3718        private JournalResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
3719    
3720        private static final JournalResponseProto defaultInstance;
3721        public static JournalResponseProto getDefaultInstance() {
3722          return defaultInstance;
3723        }
3724    
3725        public JournalResponseProto getDefaultInstanceForType() {
3726          return defaultInstance;
3727        }
3728    
3729        private final com.google.protobuf.UnknownFieldSet unknownFields;
3730        @java.lang.Override
3731        public final com.google.protobuf.UnknownFieldSet
3732            getUnknownFields() {
3733          return this.unknownFields;
3734        }
3735        private JournalResponseProto(
3736            com.google.protobuf.CodedInputStream input,
3737            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3738            throws com.google.protobuf.InvalidProtocolBufferException {
3739          initFields();
3740          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
3741              com.google.protobuf.UnknownFieldSet.newBuilder();
3742          try {
3743            boolean done = false;
3744            while (!done) {
3745              int tag = input.readTag();
3746              switch (tag) {
3747                case 0:
3748                  done = true;
3749                  break;
3750                default: {
3751                  if (!parseUnknownField(input, unknownFields,
3752                                         extensionRegistry, tag)) {
3753                    done = true;
3754                  }
3755                  break;
3756                }
3757              }
3758            }
3759          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
3760            throw e.setUnfinishedMessage(this);
3761          } catch (java.io.IOException e) {
3762            throw new com.google.protobuf.InvalidProtocolBufferException(
3763                e.getMessage()).setUnfinishedMessage(this);
3764          } finally {
3765            this.unknownFields = unknownFields.build();
3766            makeExtensionsImmutable();
3767          }
3768        }
3769        public static final com.google.protobuf.Descriptors.Descriptor
3770            getDescriptor() {
3771          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalResponseProto_descriptor;
3772        }
3773    
3774        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
3775            internalGetFieldAccessorTable() {
3776          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalResponseProto_fieldAccessorTable
3777              .ensureFieldAccessorsInitialized(
3778                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.Builder.class);
3779        }
3780    
3781        public static com.google.protobuf.Parser<JournalResponseProto> PARSER =
3782            new com.google.protobuf.AbstractParser<JournalResponseProto>() {
3783          public JournalResponseProto parsePartialFrom(
3784              com.google.protobuf.CodedInputStream input,
3785              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3786              throws com.google.protobuf.InvalidProtocolBufferException {
3787            return new JournalResponseProto(input, extensionRegistry);
3788          }
3789        };
3790    
3791        @java.lang.Override
3792        public com.google.protobuf.Parser<JournalResponseProto> getParserForType() {
3793          return PARSER;
3794        }
3795    
3796        private void initFields() {
3797        }
3798        private byte memoizedIsInitialized = -1;
3799        public final boolean isInitialized() {
3800          byte isInitialized = memoizedIsInitialized;
3801          if (isInitialized != -1) return isInitialized == 1;
3802    
3803          memoizedIsInitialized = 1;
3804          return true;
3805        }
3806    
3807        public void writeTo(com.google.protobuf.CodedOutputStream output)
3808                            throws java.io.IOException {
3809          getSerializedSize();
3810          getUnknownFields().writeTo(output);
3811        }
3812    
3813        private int memoizedSerializedSize = -1;
3814        public int getSerializedSize() {
3815          int size = memoizedSerializedSize;
3816          if (size != -1) return size;
3817    
3818          size = 0;
3819          size += getUnknownFields().getSerializedSize();
3820          memoizedSerializedSize = size;
3821          return size;
3822        }
3823    
3824        private static final long serialVersionUID = 0L;
3825        @java.lang.Override
3826        protected java.lang.Object writeReplace()
3827            throws java.io.ObjectStreamException {
3828          return super.writeReplace();
3829        }
3830    
3831        @java.lang.Override
3832        public boolean equals(final java.lang.Object obj) {
3833          if (obj == this) {
3834           return true;
3835          }
3836          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto)) {
3837            return super.equals(obj);
3838          }
3839          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) obj;
3840    
3841          boolean result = true;
3842          result = result &&
3843              getUnknownFields().equals(other.getUnknownFields());
3844          return result;
3845        }
3846    
3847        private int memoizedHashCode = 0;
3848        @java.lang.Override
3849        public int hashCode() {
3850          if (memoizedHashCode != 0) {
3851            return memoizedHashCode;
3852          }
3853          int hash = 41;
3854          hash = (19 * hash) + getDescriptorForType().hashCode();
3855          hash = (29 * hash) + getUnknownFields().hashCode();
3856          memoizedHashCode = hash;
3857          return hash;
3858        }
3859    
3860        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3861            com.google.protobuf.ByteString data)
3862            throws com.google.protobuf.InvalidProtocolBufferException {
3863          return PARSER.parseFrom(data);
3864        }
3865        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3866            com.google.protobuf.ByteString data,
3867            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3868            throws com.google.protobuf.InvalidProtocolBufferException {
3869          return PARSER.parseFrom(data, extensionRegistry);
3870        }
3871        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(byte[] data)
3872            throws com.google.protobuf.InvalidProtocolBufferException {
3873          return PARSER.parseFrom(data);
3874        }
3875        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3876            byte[] data,
3877            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3878            throws com.google.protobuf.InvalidProtocolBufferException {
3879          return PARSER.parseFrom(data, extensionRegistry);
3880        }
3881        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(java.io.InputStream input)
3882            throws java.io.IOException {
3883          return PARSER.parseFrom(input);
3884        }
3885        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3886            java.io.InputStream input,
3887            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3888            throws java.io.IOException {
3889          return PARSER.parseFrom(input, extensionRegistry);
3890        }
3891        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseDelimitedFrom(java.io.InputStream input)
3892            throws java.io.IOException {
3893          return PARSER.parseDelimitedFrom(input);
3894        }
3895        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseDelimitedFrom(
3896            java.io.InputStream input,
3897            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3898            throws java.io.IOException {
3899          return PARSER.parseDelimitedFrom(input, extensionRegistry);
3900        }
3901        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3902            com.google.protobuf.CodedInputStream input)
3903            throws java.io.IOException {
3904          return PARSER.parseFrom(input);
3905        }
3906        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3907            com.google.protobuf.CodedInputStream input,
3908            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3909            throws java.io.IOException {
3910          return PARSER.parseFrom(input, extensionRegistry);
3911        }
3912    
3913        public static Builder newBuilder() { return Builder.create(); }
3914        public Builder newBuilderForType() { return newBuilder(); }
3915        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto prototype) {
3916          return newBuilder().mergeFrom(prototype);
3917        }
3918        public Builder toBuilder() { return newBuilder(this); }
3919    
3920        @java.lang.Override
3921        protected Builder newBuilderForType(
3922            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3923          Builder builder = new Builder(parent);
3924          return builder;
3925        }
3926        /**
3927         * Protobuf type {@code hadoop.hdfs.qjournal.JournalResponseProto}
3928         */
3929        public static final class Builder extends
3930            com.google.protobuf.GeneratedMessage.Builder<Builder>
3931           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProtoOrBuilder {
3932          public static final com.google.protobuf.Descriptors.Descriptor
3933              getDescriptor() {
3934            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalResponseProto_descriptor;
3935          }
3936    
3937          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
3938              internalGetFieldAccessorTable() {
3939            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalResponseProto_fieldAccessorTable
3940                .ensureFieldAccessorsInitialized(
3941                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.Builder.class);
3942          }
3943    
3944          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.newBuilder()
3945          private Builder() {
3946            maybeForceBuilderInitialization();
3947          }
3948    
3949          private Builder(
3950              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3951            super(parent);
3952            maybeForceBuilderInitialization();
3953          }
3954          private void maybeForceBuilderInitialization() {
3955            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
3956            }
3957          }
3958          private static Builder create() {
3959            return new Builder();
3960          }
3961    
3962          public Builder clear() {
3963            super.clear();
3964            return this;
3965          }
3966    
3967          public Builder clone() {
3968            return create().mergeFrom(buildPartial());
3969          }
3970    
3971          public com.google.protobuf.Descriptors.Descriptor
3972              getDescriptorForType() {
3973            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_JournalResponseProto_descriptor;
3974          }
3975    
3976          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto getDefaultInstanceForType() {
3977            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance();
3978          }
3979    
3980          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto build() {
3981            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto result = buildPartial();
3982            if (!result.isInitialized()) {
3983              throw newUninitializedMessageException(result);
3984            }
3985            return result;
3986          }
3987    
3988          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto buildPartial() {
3989            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto(this);
3990            onBuilt();
3991            return result;
3992          }
3993    
3994          public Builder mergeFrom(com.google.protobuf.Message other) {
3995            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) {
3996              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto)other);
3997            } else {
3998              super.mergeFrom(other);
3999              return this;
4000            }
4001          }
4002    
4003          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto other) {
4004            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance()) return this;
4005            this.mergeUnknownFields(other.getUnknownFields());
4006            return this;
4007          }
4008    
4009          public final boolean isInitialized() {
4010            return true;
4011          }
4012    
4013          public Builder mergeFrom(
4014              com.google.protobuf.CodedInputStream input,
4015              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4016              throws java.io.IOException {
4017            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parsedMessage = null;
4018            try {
4019              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
4020            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4021              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) e.getUnfinishedMessage();
4022              throw e;
4023            } finally {
4024              if (parsedMessage != null) {
4025                mergeFrom(parsedMessage);
4026              }
4027            }
4028            return this;
4029          }
4030    
4031          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.JournalResponseProto)
4032        }
4033    
4034        static {
4035          defaultInstance = new JournalResponseProto(true);
4036          defaultInstance.initFields();
4037        }
4038    
4039        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.JournalResponseProto)
4040      }
4041    
4042      public interface HeartbeatRequestProtoOrBuilder
4043          extends com.google.protobuf.MessageOrBuilder {
4044    
4045        // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;
4046        /**
4047         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
4048         */
4049        boolean hasReqInfo();
4050        /**
4051         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
4052         */
4053        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
4054        /**
4055         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
4056         */
4057        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
4058      }
4059      /**
4060       * Protobuf type {@code hadoop.hdfs.qjournal.HeartbeatRequestProto}
4061       */
4062      public static final class HeartbeatRequestProto extends
4063          com.google.protobuf.GeneratedMessage
4064          implements HeartbeatRequestProtoOrBuilder {
4065        // Use HeartbeatRequestProto.newBuilder() to construct.
4066        private HeartbeatRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
4067          super(builder);
4068          this.unknownFields = builder.getUnknownFields();
4069        }
4070        private HeartbeatRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
4071    
4072        private static final HeartbeatRequestProto defaultInstance;
4073        public static HeartbeatRequestProto getDefaultInstance() {
4074          return defaultInstance;
4075        }
4076    
4077        public HeartbeatRequestProto getDefaultInstanceForType() {
4078          return defaultInstance;
4079        }
4080    
4081        private final com.google.protobuf.UnknownFieldSet unknownFields;
4082        @java.lang.Override
4083        public final com.google.protobuf.UnknownFieldSet
4084            getUnknownFields() {
4085          return this.unknownFields;
4086        }
4087        private HeartbeatRequestProto(
4088            com.google.protobuf.CodedInputStream input,
4089            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4090            throws com.google.protobuf.InvalidProtocolBufferException {
4091          initFields();
4092          int mutable_bitField0_ = 0;
4093          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
4094              com.google.protobuf.UnknownFieldSet.newBuilder();
4095          try {
4096            boolean done = false;
4097            while (!done) {
4098              int tag = input.readTag();
4099              switch (tag) {
4100                case 0:
4101                  done = true;
4102                  break;
4103                default: {
4104                  if (!parseUnknownField(input, unknownFields,
4105                                         extensionRegistry, tag)) {
4106                    done = true;
4107                  }
4108                  break;
4109                }
4110                case 10: {
4111                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
4112                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
4113                    subBuilder = reqInfo_.toBuilder();
4114                  }
4115                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
4116                  if (subBuilder != null) {
4117                    subBuilder.mergeFrom(reqInfo_);
4118                    reqInfo_ = subBuilder.buildPartial();
4119                  }
4120                  bitField0_ |= 0x00000001;
4121                  break;
4122                }
4123              }
4124            }
4125          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4126            throw e.setUnfinishedMessage(this);
4127          } catch (java.io.IOException e) {
4128            throw new com.google.protobuf.InvalidProtocolBufferException(
4129                e.getMessage()).setUnfinishedMessage(this);
4130          } finally {
4131            this.unknownFields = unknownFields.build();
4132            makeExtensionsImmutable();
4133          }
4134        }
4135        public static final com.google.protobuf.Descriptors.Descriptor
4136            getDescriptor() {
4137          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_HeartbeatRequestProto_descriptor;
4138        }
4139    
4140        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4141            internalGetFieldAccessorTable() {
4142          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_HeartbeatRequestProto_fieldAccessorTable
4143              .ensureFieldAccessorsInitialized(
4144                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.Builder.class);
4145        }
4146    
4147        public static com.google.protobuf.Parser<HeartbeatRequestProto> PARSER =
4148            new com.google.protobuf.AbstractParser<HeartbeatRequestProto>() {
4149          public HeartbeatRequestProto parsePartialFrom(
4150              com.google.protobuf.CodedInputStream input,
4151              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4152              throws com.google.protobuf.InvalidProtocolBufferException {
4153            return new HeartbeatRequestProto(input, extensionRegistry);
4154          }
4155        };
4156    
4157        @java.lang.Override
4158        public com.google.protobuf.Parser<HeartbeatRequestProto> getParserForType() {
4159          return PARSER;
4160        }
4161    
4162        private int bitField0_;
4163        // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;
4164        public static final int REQINFO_FIELD_NUMBER = 1;
4165        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
4166        /**
4167         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
4168         */
4169        public boolean hasReqInfo() {
4170          return ((bitField0_ & 0x00000001) == 0x00000001);
4171        }
4172        /**
4173         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
4174         */
4175        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
4176          return reqInfo_;
4177        }
4178        /**
4179         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
4180         */
4181        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
4182          return reqInfo_;
4183        }
4184    
4185        private void initFields() {
4186          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
4187        }
4188        private byte memoizedIsInitialized = -1;
4189        public final boolean isInitialized() {
4190          byte isInitialized = memoizedIsInitialized;
4191          if (isInitialized != -1) return isInitialized == 1;
4192    
4193          if (!hasReqInfo()) {
4194            memoizedIsInitialized = 0;
4195            return false;
4196          }
4197          if (!getReqInfo().isInitialized()) {
4198            memoizedIsInitialized = 0;
4199            return false;
4200          }
4201          memoizedIsInitialized = 1;
4202          return true;
4203        }
4204    
4205        public void writeTo(com.google.protobuf.CodedOutputStream output)
4206                            throws java.io.IOException {
4207          getSerializedSize();
4208          if (((bitField0_ & 0x00000001) == 0x00000001)) {
4209            output.writeMessage(1, reqInfo_);
4210          }
4211          getUnknownFields().writeTo(output);
4212        }
4213    
4214        private int memoizedSerializedSize = -1;
4215        public int getSerializedSize() {
4216          int size = memoizedSerializedSize;
4217          if (size != -1) return size;
4218    
4219          size = 0;
4220          if (((bitField0_ & 0x00000001) == 0x00000001)) {
4221            size += com.google.protobuf.CodedOutputStream
4222              .computeMessageSize(1, reqInfo_);
4223          }
4224          size += getUnknownFields().getSerializedSize();
4225          memoizedSerializedSize = size;
4226          return size;
4227        }
4228    
4229        private static final long serialVersionUID = 0L;
4230        @java.lang.Override
4231        protected java.lang.Object writeReplace()
4232            throws java.io.ObjectStreamException {
4233          return super.writeReplace();
4234        }
4235    
4236        @java.lang.Override
4237        public boolean equals(final java.lang.Object obj) {
4238          if (obj == this) {
4239           return true;
4240          }
4241          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)) {
4242            return super.equals(obj);
4243          }
4244          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto) obj;
4245    
4246          boolean result = true;
4247          result = result && (hasReqInfo() == other.hasReqInfo());
4248          if (hasReqInfo()) {
4249            result = result && getReqInfo()
4250                .equals(other.getReqInfo());
4251          }
4252          result = result &&
4253              getUnknownFields().equals(other.getUnknownFields());
4254          return result;
4255        }
4256    
4257        private int memoizedHashCode = 0;
4258        @java.lang.Override
4259        public int hashCode() {
4260          if (memoizedHashCode != 0) {
4261            return memoizedHashCode;
4262          }
4263          int hash = 41;
4264          hash = (19 * hash) + getDescriptorForType().hashCode();
4265          if (hasReqInfo()) {
4266            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
4267            hash = (53 * hash) + getReqInfo().hashCode();
4268          }
4269          hash = (29 * hash) + getUnknownFields().hashCode();
4270          memoizedHashCode = hash;
4271          return hash;
4272        }
4273    
4274        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4275            com.google.protobuf.ByteString data)
4276            throws com.google.protobuf.InvalidProtocolBufferException {
4277          return PARSER.parseFrom(data);
4278        }
4279        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4280            com.google.protobuf.ByteString data,
4281            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4282            throws com.google.protobuf.InvalidProtocolBufferException {
4283          return PARSER.parseFrom(data, extensionRegistry);
4284        }
4285        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(byte[] data)
4286            throws com.google.protobuf.InvalidProtocolBufferException {
4287          return PARSER.parseFrom(data);
4288        }
4289        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4290            byte[] data,
4291            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4292            throws com.google.protobuf.InvalidProtocolBufferException {
4293          return PARSER.parseFrom(data, extensionRegistry);
4294        }
4295        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(java.io.InputStream input)
4296            throws java.io.IOException {
4297          return PARSER.parseFrom(input);
4298        }
4299        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4300            java.io.InputStream input,
4301            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4302            throws java.io.IOException {
4303          return PARSER.parseFrom(input, extensionRegistry);
4304        }
4305        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseDelimitedFrom(java.io.InputStream input)
4306            throws java.io.IOException {
4307          return PARSER.parseDelimitedFrom(input);
4308        }
4309        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseDelimitedFrom(
4310            java.io.InputStream input,
4311            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4312            throws java.io.IOException {
4313          return PARSER.parseDelimitedFrom(input, extensionRegistry);
4314        }
4315        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4316            com.google.protobuf.CodedInputStream input)
4317            throws java.io.IOException {
4318          return PARSER.parseFrom(input);
4319        }
4320        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
4321            com.google.protobuf.CodedInputStream input,
4322            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4323            throws java.io.IOException {
4324          return PARSER.parseFrom(input, extensionRegistry);
4325        }
4326    
4327        public static Builder newBuilder() { return Builder.create(); }
4328        public Builder newBuilderForType() { return newBuilder(); }
4329        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto prototype) {
4330          return newBuilder().mergeFrom(prototype);
4331        }
4332        public Builder toBuilder() { return newBuilder(this); }
4333    
4334        @java.lang.Override
4335        protected Builder newBuilderForType(
4336            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4337          Builder builder = new Builder(parent);
4338          return builder;
4339        }
4340        /**
4341         * Protobuf type {@code hadoop.hdfs.qjournal.HeartbeatRequestProto}
4342         */
4343        public static final class Builder extends
4344            com.google.protobuf.GeneratedMessage.Builder<Builder>
4345           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProtoOrBuilder {
4346          public static final com.google.protobuf.Descriptors.Descriptor
4347              getDescriptor() {
4348            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_HeartbeatRequestProto_descriptor;
4349          }
4350    
4351          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4352              internalGetFieldAccessorTable() {
4353            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_HeartbeatRequestProto_fieldAccessorTable
4354                .ensureFieldAccessorsInitialized(
4355                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.Builder.class);
4356          }
4357    
4358          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.newBuilder()
4359          private Builder() {
4360            maybeForceBuilderInitialization();
4361          }
4362    
4363          private Builder(
4364              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4365            super(parent);
4366            maybeForceBuilderInitialization();
4367          }
4368          private void maybeForceBuilderInitialization() {
4369            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
4370              getReqInfoFieldBuilder();
4371            }
4372          }
4373          private static Builder create() {
4374            return new Builder();
4375          }
4376    
4377          public Builder clear() {
4378            super.clear();
4379            if (reqInfoBuilder_ == null) {
4380              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
4381            } else {
4382              reqInfoBuilder_.clear();
4383            }
4384            bitField0_ = (bitField0_ & ~0x00000001);
4385            return this;
4386          }
4387    
4388          public Builder clone() {
4389            return create().mergeFrom(buildPartial());
4390          }
4391    
4392          public com.google.protobuf.Descriptors.Descriptor
4393              getDescriptorForType() {
4394            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_HeartbeatRequestProto_descriptor;
4395          }
4396    
4397          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto getDefaultInstanceForType() {
4398            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
4399          }
4400    
4401          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto build() {
4402            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto result = buildPartial();
4403            if (!result.isInitialized()) {
4404              throw newUninitializedMessageException(result);
4405            }
4406            return result;
4407          }
4408    
4409          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto buildPartial() {
4410            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto(this);
4411            int from_bitField0_ = bitField0_;
4412            int to_bitField0_ = 0;
4413            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
4414              to_bitField0_ |= 0x00000001;
4415            }
4416            if (reqInfoBuilder_ == null) {
4417              result.reqInfo_ = reqInfo_;
4418            } else {
4419              result.reqInfo_ = reqInfoBuilder_.build();
4420            }
4421            result.bitField0_ = to_bitField0_;
4422            onBuilt();
4423            return result;
4424          }
4425    
4426          public Builder mergeFrom(com.google.protobuf.Message other) {
4427            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto) {
4428              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)other);
4429            } else {
4430              super.mergeFrom(other);
4431              return this;
4432            }
4433          }
4434    
4435          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto other) {
4436            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance()) return this;
4437            if (other.hasReqInfo()) {
4438              mergeReqInfo(other.getReqInfo());
4439            }
4440            this.mergeUnknownFields(other.getUnknownFields());
4441            return this;
4442          }
4443    
4444          public final boolean isInitialized() {
4445            if (!hasReqInfo()) {
4446              
4447              return false;
4448            }
4449            if (!getReqInfo().isInitialized()) {
4450              
4451              return false;
4452            }
4453            return true;
4454          }
4455    
4456          public Builder mergeFrom(
4457              com.google.protobuf.CodedInputStream input,
4458              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4459              throws java.io.IOException {
4460            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parsedMessage = null;
4461            try {
4462              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
4463            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4464              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto) e.getUnfinishedMessage();
4465              throw e;
4466            } finally {
4467              if (parsedMessage != null) {
4468                mergeFrom(parsedMessage);
4469              }
4470            }
4471            return this;
4472          }
4473          private int bitField0_;
4474    
4475          // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;
4476          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
4477          private com.google.protobuf.SingleFieldBuilder<
4478              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
4479          /**
4480           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
4481           */
4482          public boolean hasReqInfo() {
4483            return ((bitField0_ & 0x00000001) == 0x00000001);
4484          }
4485          /**
4486           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
4487           */
4488          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
4489            if (reqInfoBuilder_ == null) {
4490              return reqInfo_;
4491            } else {
4492              return reqInfoBuilder_.getMessage();
4493            }
4494          }
4495          /**
4496           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
4497           */
4498          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
4499            if (reqInfoBuilder_ == null) {
4500              if (value == null) {
4501                throw new NullPointerException();
4502              }
4503              reqInfo_ = value;
4504              onChanged();
4505            } else {
4506              reqInfoBuilder_.setMessage(value);
4507            }
4508            bitField0_ |= 0x00000001;
4509            return this;
4510          }
4511          /**
4512           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
4513           */
4514          public Builder setReqInfo(
4515              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
4516            if (reqInfoBuilder_ == null) {
4517              reqInfo_ = builderForValue.build();
4518              onChanged();
4519            } else {
4520              reqInfoBuilder_.setMessage(builderForValue.build());
4521            }
4522            bitField0_ |= 0x00000001;
4523            return this;
4524          }
4525          /**
4526           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
4527           */
4528          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
4529            if (reqInfoBuilder_ == null) {
4530              if (((bitField0_ & 0x00000001) == 0x00000001) &&
4531                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
4532                reqInfo_ =
4533                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
4534              } else {
4535                reqInfo_ = value;
4536              }
4537              onChanged();
4538            } else {
4539              reqInfoBuilder_.mergeFrom(value);
4540            }
4541            bitField0_ |= 0x00000001;
4542            return this;
4543          }
4544          /**
4545           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
4546           */
4547          public Builder clearReqInfo() {
4548            if (reqInfoBuilder_ == null) {
4549              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
4550              onChanged();
4551            } else {
4552              reqInfoBuilder_.clear();
4553            }
4554            bitField0_ = (bitField0_ & ~0x00000001);
4555            return this;
4556          }
4557          /**
4558           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
4559           */
4560          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
4561            bitField0_ |= 0x00000001;
4562            onChanged();
4563            return getReqInfoFieldBuilder().getBuilder();
4564          }
4565          /**
4566           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
4567           */
4568          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
4569            if (reqInfoBuilder_ != null) {
4570              return reqInfoBuilder_.getMessageOrBuilder();
4571            } else {
4572              return reqInfo_;
4573            }
4574          }
4575          /**
4576           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
4577           */
4578          private com.google.protobuf.SingleFieldBuilder<
4579              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
4580              getReqInfoFieldBuilder() {
4581            if (reqInfoBuilder_ == null) {
4582              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
4583                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
4584                      reqInfo_,
4585                      getParentForChildren(),
4586                      isClean());
4587              reqInfo_ = null;
4588            }
4589            return reqInfoBuilder_;
4590          }
4591    
4592          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.HeartbeatRequestProto)
4593        }
4594    
4595        static {
4596          defaultInstance = new HeartbeatRequestProto(true);
4597          defaultInstance.initFields();
4598        }
4599    
4600        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.HeartbeatRequestProto)
4601      }
4602    
4603      public interface HeartbeatResponseProtoOrBuilder
4604          extends com.google.protobuf.MessageOrBuilder {
4605      }
4606      /**
4607       * Protobuf type {@code hadoop.hdfs.qjournal.HeartbeatResponseProto}
4608       *
4609       * <pre>
4610       * void response
4611       * </pre>
4612       */
4613      public static final class HeartbeatResponseProto extends
4614          com.google.protobuf.GeneratedMessage
4615          implements HeartbeatResponseProtoOrBuilder {
4616        // Use HeartbeatResponseProto.newBuilder() to construct.
4617        private HeartbeatResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
4618          super(builder);
4619          this.unknownFields = builder.getUnknownFields();
4620        }
4621        private HeartbeatResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
4622    
4623        private static final HeartbeatResponseProto defaultInstance;
4624        public static HeartbeatResponseProto getDefaultInstance() {
4625          return defaultInstance;
4626        }
4627    
4628        public HeartbeatResponseProto getDefaultInstanceForType() {
4629          return defaultInstance;
4630        }
4631    
4632        private final com.google.protobuf.UnknownFieldSet unknownFields;
4633        @java.lang.Override
4634        public final com.google.protobuf.UnknownFieldSet
4635            getUnknownFields() {
4636          return this.unknownFields;
4637        }
4638        private HeartbeatResponseProto(
4639            com.google.protobuf.CodedInputStream input,
4640            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4641            throws com.google.protobuf.InvalidProtocolBufferException {
4642          initFields();
4643          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
4644              com.google.protobuf.UnknownFieldSet.newBuilder();
4645          try {
4646            boolean done = false;
4647            while (!done) {
4648              int tag = input.readTag();
4649              switch (tag) {
4650                case 0:
4651                  done = true;
4652                  break;
4653                default: {
4654                  if (!parseUnknownField(input, unknownFields,
4655                                         extensionRegistry, tag)) {
4656                    done = true;
4657                  }
4658                  break;
4659                }
4660              }
4661            }
4662          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4663            throw e.setUnfinishedMessage(this);
4664          } catch (java.io.IOException e) {
4665            throw new com.google.protobuf.InvalidProtocolBufferException(
4666                e.getMessage()).setUnfinishedMessage(this);
4667          } finally {
4668            this.unknownFields = unknownFields.build();
4669            makeExtensionsImmutable();
4670          }
4671        }
4672        public static final com.google.protobuf.Descriptors.Descriptor
4673            getDescriptor() {
4674          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_HeartbeatResponseProto_descriptor;
4675        }
4676    
4677        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4678            internalGetFieldAccessorTable() {
4679          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_HeartbeatResponseProto_fieldAccessorTable
4680              .ensureFieldAccessorsInitialized(
4681                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.Builder.class);
4682        }
4683    
4684        public static com.google.protobuf.Parser<HeartbeatResponseProto> PARSER =
4685            new com.google.protobuf.AbstractParser<HeartbeatResponseProto>() {
4686          public HeartbeatResponseProto parsePartialFrom(
4687              com.google.protobuf.CodedInputStream input,
4688              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4689              throws com.google.protobuf.InvalidProtocolBufferException {
4690            return new HeartbeatResponseProto(input, extensionRegistry);
4691          }
4692        };
4693    
4694        @java.lang.Override
4695        public com.google.protobuf.Parser<HeartbeatResponseProto> getParserForType() {
4696          return PARSER;
4697        }
4698    
4699        private void initFields() {
4700        }
4701        private byte memoizedIsInitialized = -1;
4702        public final boolean isInitialized() {
4703          byte isInitialized = memoizedIsInitialized;
4704          if (isInitialized != -1) return isInitialized == 1;
4705    
4706          memoizedIsInitialized = 1;
4707          return true;
4708        }
4709    
4710        public void writeTo(com.google.protobuf.CodedOutputStream output)
4711                            throws java.io.IOException {
4712          getSerializedSize();
4713          getUnknownFields().writeTo(output);
4714        }
4715    
4716        private int memoizedSerializedSize = -1;
4717        public int getSerializedSize() {
4718          int size = memoizedSerializedSize;
4719          if (size != -1) return size;
4720    
4721          size = 0;
4722          size += getUnknownFields().getSerializedSize();
4723          memoizedSerializedSize = size;
4724          return size;
4725        }
4726    
4727        private static final long serialVersionUID = 0L;
4728        @java.lang.Override
4729        protected java.lang.Object writeReplace()
4730            throws java.io.ObjectStreamException {
4731          return super.writeReplace();
4732        }
4733    
4734        @java.lang.Override
4735        public boolean equals(final java.lang.Object obj) {
4736          if (obj == this) {
4737           return true;
4738          }
4739          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto)) {
4740            return super.equals(obj);
4741          }
4742          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) obj;
4743    
4744          boolean result = true;
4745          result = result &&
4746              getUnknownFields().equals(other.getUnknownFields());
4747          return result;
4748        }
4749    
4750        private int memoizedHashCode = 0;
4751        @java.lang.Override
4752        public int hashCode() {
4753          if (memoizedHashCode != 0) {
4754            return memoizedHashCode;
4755          }
4756          int hash = 41;
4757          hash = (19 * hash) + getDescriptorForType().hashCode();
4758          hash = (29 * hash) + getUnknownFields().hashCode();
4759          memoizedHashCode = hash;
4760          return hash;
4761        }
4762    
4763        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4764            com.google.protobuf.ByteString data)
4765            throws com.google.protobuf.InvalidProtocolBufferException {
4766          return PARSER.parseFrom(data);
4767        }
4768        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4769            com.google.protobuf.ByteString data,
4770            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4771            throws com.google.protobuf.InvalidProtocolBufferException {
4772          return PARSER.parseFrom(data, extensionRegistry);
4773        }
4774        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(byte[] data)
4775            throws com.google.protobuf.InvalidProtocolBufferException {
4776          return PARSER.parseFrom(data);
4777        }
4778        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4779            byte[] data,
4780            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4781            throws com.google.protobuf.InvalidProtocolBufferException {
4782          return PARSER.parseFrom(data, extensionRegistry);
4783        }
4784        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(java.io.InputStream input)
4785            throws java.io.IOException {
4786          return PARSER.parseFrom(input);
4787        }
4788        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4789            java.io.InputStream input,
4790            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4791            throws java.io.IOException {
4792          return PARSER.parseFrom(input, extensionRegistry);
4793        }
4794        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseDelimitedFrom(java.io.InputStream input)
4795            throws java.io.IOException {
4796          return PARSER.parseDelimitedFrom(input);
4797        }
4798        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseDelimitedFrom(
4799            java.io.InputStream input,
4800            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4801            throws java.io.IOException {
4802          return PARSER.parseDelimitedFrom(input, extensionRegistry);
4803        }
4804        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4805            com.google.protobuf.CodedInputStream input)
4806            throws java.io.IOException {
4807          return PARSER.parseFrom(input);
4808        }
4809        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
4810            com.google.protobuf.CodedInputStream input,
4811            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4812            throws java.io.IOException {
4813          return PARSER.parseFrom(input, extensionRegistry);
4814        }
4815    
4816        public static Builder newBuilder() { return Builder.create(); }
4817        public Builder newBuilderForType() { return newBuilder(); }
4818        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto prototype) {
4819          return newBuilder().mergeFrom(prototype);
4820        }
4821        public Builder toBuilder() { return newBuilder(this); }
4822    
4823        @java.lang.Override
4824        protected Builder newBuilderForType(
4825            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4826          Builder builder = new Builder(parent);
4827          return builder;
4828        }
4829        /**
4830         * Protobuf type {@code hadoop.hdfs.qjournal.HeartbeatResponseProto}
4831         *
4832         * <pre>
4833         * void response
4834         * </pre>
4835         */
4836        public static final class Builder extends
4837            com.google.protobuf.GeneratedMessage.Builder<Builder>
4838           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProtoOrBuilder {
4839          public static final com.google.protobuf.Descriptors.Descriptor
4840              getDescriptor() {
4841            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_HeartbeatResponseProto_descriptor;
4842          }
4843    
4844          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4845              internalGetFieldAccessorTable() {
4846            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_HeartbeatResponseProto_fieldAccessorTable
4847                .ensureFieldAccessorsInitialized(
4848                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.Builder.class);
4849          }
4850    
4851          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.newBuilder()
4852          private Builder() {
4853            maybeForceBuilderInitialization();
4854          }
4855    
4856          private Builder(
4857              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4858            super(parent);
4859            maybeForceBuilderInitialization();
4860          }
4861          private void maybeForceBuilderInitialization() {
4862            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
4863            }
4864          }
4865          private static Builder create() {
4866            return new Builder();
4867          }
4868    
4869          public Builder clear() {
4870            super.clear();
4871            return this;
4872          }
4873    
4874          public Builder clone() {
4875            return create().mergeFrom(buildPartial());
4876          }
4877    
4878          public com.google.protobuf.Descriptors.Descriptor
4879              getDescriptorForType() {
4880            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_HeartbeatResponseProto_descriptor;
4881          }
4882    
4883          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto getDefaultInstanceForType() {
4884            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
4885          }
4886    
4887          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto build() {
4888            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto result = buildPartial();
4889            if (!result.isInitialized()) {
4890              throw newUninitializedMessageException(result);
4891            }
4892            return result;
4893          }
4894    
4895          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto buildPartial() {
4896            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto(this);
4897            onBuilt();
4898            return result;
4899          }
4900    
4901          public Builder mergeFrom(com.google.protobuf.Message other) {
4902            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) {
4903              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto)other);
4904            } else {
4905              super.mergeFrom(other);
4906              return this;
4907            }
4908          }
4909    
4910          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto other) {
4911            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance()) return this;
4912            this.mergeUnknownFields(other.getUnknownFields());
4913            return this;
4914          }
4915    
4916          public final boolean isInitialized() {
4917            return true;
4918          }
4919    
4920          public Builder mergeFrom(
4921              com.google.protobuf.CodedInputStream input,
4922              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4923              throws java.io.IOException {
4924            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parsedMessage = null;
4925            try {
4926              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
4927            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
4928              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) e.getUnfinishedMessage();
4929              throw e;
4930            } finally {
4931              if (parsedMessage != null) {
4932                mergeFrom(parsedMessage);
4933              }
4934            }
4935            return this;
4936          }
4937    
4938          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.HeartbeatResponseProto)
4939        }
4940    
4941        static {
4942          defaultInstance = new HeartbeatResponseProto(true);
4943          defaultInstance.initFields();
4944        }
4945    
4946        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.HeartbeatResponseProto)
4947      }
4948    
4949      public interface StartLogSegmentRequestProtoOrBuilder
4950          extends com.google.protobuf.MessageOrBuilder {
4951    
4952        // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;
4953        /**
4954         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
4955         */
4956        boolean hasReqInfo();
4957        /**
4958         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
4959         */
4960        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
4961        /**
4962         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
4963         */
4964        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
4965    
4966        // required uint64 txid = 2;
4967        /**
4968         * <code>required uint64 txid = 2;</code>
4969         *
4970         * <pre>
4971         * Transaction ID
4972         * </pre>
4973         */
4974        boolean hasTxid();
4975        /**
4976         * <code>required uint64 txid = 2;</code>
4977         *
4978         * <pre>
4979         * Transaction ID
4980         * </pre>
4981         */
4982        long getTxid();
4983    
4984        // optional sint32 layoutVersion = 3;
4985        /**
4986         * <code>optional sint32 layoutVersion = 3;</code>
4987         *
4988         * <pre>
4989         * the LayoutVersion in the client
4990         * </pre>
4991         */
4992        boolean hasLayoutVersion();
4993        /**
4994         * <code>optional sint32 layoutVersion = 3;</code>
4995         *
4996         * <pre>
4997         * the LayoutVersion in the client
4998         * </pre>
4999         */
5000        int getLayoutVersion();
5001      }
5002      /**
5003       * Protobuf type {@code hadoop.hdfs.qjournal.StartLogSegmentRequestProto}
5004       *
5005       * <pre>
5006       **
5007       * startLogSegment()
5008       * </pre>
5009       */
5010      public static final class StartLogSegmentRequestProto extends
5011          com.google.protobuf.GeneratedMessage
5012          implements StartLogSegmentRequestProtoOrBuilder {
5013        // Use StartLogSegmentRequestProto.newBuilder() to construct.
5014        private StartLogSegmentRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
5015          super(builder);
5016          this.unknownFields = builder.getUnknownFields();
5017        }
5018        private StartLogSegmentRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
5019    
5020        private static final StartLogSegmentRequestProto defaultInstance;
5021        public static StartLogSegmentRequestProto getDefaultInstance() {
5022          return defaultInstance;
5023        }
5024    
5025        public StartLogSegmentRequestProto getDefaultInstanceForType() {
5026          return defaultInstance;
5027        }
5028    
5029        private final com.google.protobuf.UnknownFieldSet unknownFields;
5030        @java.lang.Override
5031        public final com.google.protobuf.UnknownFieldSet
5032            getUnknownFields() {
5033          return this.unknownFields;
5034        }
5035        private StartLogSegmentRequestProto(
5036            com.google.protobuf.CodedInputStream input,
5037            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5038            throws com.google.protobuf.InvalidProtocolBufferException {
5039          initFields();
5040          int mutable_bitField0_ = 0;
5041          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
5042              com.google.protobuf.UnknownFieldSet.newBuilder();
5043          try {
5044            boolean done = false;
5045            while (!done) {
5046              int tag = input.readTag();
5047              switch (tag) {
5048                case 0:
5049                  done = true;
5050                  break;
5051                default: {
5052                  if (!parseUnknownField(input, unknownFields,
5053                                         extensionRegistry, tag)) {
5054                    done = true;
5055                  }
5056                  break;
5057                }
5058                case 10: {
5059                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
5060                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
5061                    subBuilder = reqInfo_.toBuilder();
5062                  }
5063                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
5064                  if (subBuilder != null) {
5065                    subBuilder.mergeFrom(reqInfo_);
5066                    reqInfo_ = subBuilder.buildPartial();
5067                  }
5068                  bitField0_ |= 0x00000001;
5069                  break;
5070                }
5071                case 16: {
5072                  bitField0_ |= 0x00000002;
5073                  txid_ = input.readUInt64();
5074                  break;
5075                }
5076                case 24: {
5077                  bitField0_ |= 0x00000004;
5078                  layoutVersion_ = input.readSInt32();
5079                  break;
5080                }
5081              }
5082            }
5083          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
5084            throw e.setUnfinishedMessage(this);
5085          } catch (java.io.IOException e) {
5086            throw new com.google.protobuf.InvalidProtocolBufferException(
5087                e.getMessage()).setUnfinishedMessage(this);
5088          } finally {
5089            this.unknownFields = unknownFields.build();
5090            makeExtensionsImmutable();
5091          }
5092        }
5093        public static final com.google.protobuf.Descriptors.Descriptor
5094            getDescriptor() {
5095          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_StartLogSegmentRequestProto_descriptor;
5096        }
5097    
5098        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
5099            internalGetFieldAccessorTable() {
5100          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_StartLogSegmentRequestProto_fieldAccessorTable
5101              .ensureFieldAccessorsInitialized(
5102                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.Builder.class);
5103        }
5104    
5105        public static com.google.protobuf.Parser<StartLogSegmentRequestProto> PARSER =
5106            new com.google.protobuf.AbstractParser<StartLogSegmentRequestProto>() {
5107          public StartLogSegmentRequestProto parsePartialFrom(
5108              com.google.protobuf.CodedInputStream input,
5109              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5110              throws com.google.protobuf.InvalidProtocolBufferException {
5111            return new StartLogSegmentRequestProto(input, extensionRegistry);
5112          }
5113        };
5114    
5115        @java.lang.Override
5116        public com.google.protobuf.Parser<StartLogSegmentRequestProto> getParserForType() {
5117          return PARSER;
5118        }
5119    
5120        private int bitField0_;
5121        // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;
5122        public static final int REQINFO_FIELD_NUMBER = 1;
5123        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
5124        /**
5125         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
5126         */
5127        public boolean hasReqInfo() {
5128          return ((bitField0_ & 0x00000001) == 0x00000001);
5129        }
5130        /**
5131         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
5132         */
5133        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
5134          return reqInfo_;
5135        }
5136        /**
5137         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
5138         */
5139        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
5140          return reqInfo_;
5141        }
5142    
5143        // required uint64 txid = 2;
5144        public static final int TXID_FIELD_NUMBER = 2;
5145        private long txid_;
5146        /**
5147         * <code>required uint64 txid = 2;</code>
5148         *
5149         * <pre>
5150         * Transaction ID
5151         * </pre>
5152         */
5153        public boolean hasTxid() {
5154          return ((bitField0_ & 0x00000002) == 0x00000002);
5155        }
5156        /**
5157         * <code>required uint64 txid = 2;</code>
5158         *
5159         * <pre>
5160         * Transaction ID
5161         * </pre>
5162         */
5163        public long getTxid() {
5164          return txid_;
5165        }
5166    
5167        // optional sint32 layoutVersion = 3;
5168        public static final int LAYOUTVERSION_FIELD_NUMBER = 3;
5169        private int layoutVersion_;
5170        /**
5171         * <code>optional sint32 layoutVersion = 3;</code>
5172         *
5173         * <pre>
5174         * the LayoutVersion in the client
5175         * </pre>
5176         */
5177        public boolean hasLayoutVersion() {
5178          return ((bitField0_ & 0x00000004) == 0x00000004);
5179        }
5180        /**
5181         * <code>optional sint32 layoutVersion = 3;</code>
5182         *
5183         * <pre>
5184         * the LayoutVersion in the client
5185         * </pre>
5186         */
5187        public int getLayoutVersion() {
5188          return layoutVersion_;
5189        }
5190    
5191        private void initFields() {
5192          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
5193          txid_ = 0L;
5194          layoutVersion_ = 0;
5195        }
5196        private byte memoizedIsInitialized = -1;
5197        public final boolean isInitialized() {
5198          byte isInitialized = memoizedIsInitialized;
5199          if (isInitialized != -1) return isInitialized == 1;
5200    
5201          if (!hasReqInfo()) {
5202            memoizedIsInitialized = 0;
5203            return false;
5204          }
5205          if (!hasTxid()) {
5206            memoizedIsInitialized = 0;
5207            return false;
5208          }
5209          if (!getReqInfo().isInitialized()) {
5210            memoizedIsInitialized = 0;
5211            return false;
5212          }
5213          memoizedIsInitialized = 1;
5214          return true;
5215        }
5216    
5217        public void writeTo(com.google.protobuf.CodedOutputStream output)
5218                            throws java.io.IOException {
5219          getSerializedSize();
5220          if (((bitField0_ & 0x00000001) == 0x00000001)) {
5221            output.writeMessage(1, reqInfo_);
5222          }
5223          if (((bitField0_ & 0x00000002) == 0x00000002)) {
5224            output.writeUInt64(2, txid_);
5225          }
5226          if (((bitField0_ & 0x00000004) == 0x00000004)) {
5227            output.writeSInt32(3, layoutVersion_);
5228          }
5229          getUnknownFields().writeTo(output);
5230        }
5231    
5232        private int memoizedSerializedSize = -1;
5233        public int getSerializedSize() {
5234          int size = memoizedSerializedSize;
5235          if (size != -1) return size;
5236    
5237          size = 0;
5238          if (((bitField0_ & 0x00000001) == 0x00000001)) {
5239            size += com.google.protobuf.CodedOutputStream
5240              .computeMessageSize(1, reqInfo_);
5241          }
5242          if (((bitField0_ & 0x00000002) == 0x00000002)) {
5243            size += com.google.protobuf.CodedOutputStream
5244              .computeUInt64Size(2, txid_);
5245          }
5246          if (((bitField0_ & 0x00000004) == 0x00000004)) {
5247            size += com.google.protobuf.CodedOutputStream
5248              .computeSInt32Size(3, layoutVersion_);
5249          }
5250          size += getUnknownFields().getSerializedSize();
5251          memoizedSerializedSize = size;
5252          return size;
5253        }
5254    
5255        private static final long serialVersionUID = 0L;
5256        @java.lang.Override
5257        protected java.lang.Object writeReplace()
5258            throws java.io.ObjectStreamException {
5259          return super.writeReplace();
5260        }
5261    
5262        @java.lang.Override
5263        public boolean equals(final java.lang.Object obj) {
5264          if (obj == this) {
5265           return true;
5266          }
5267          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)) {
5268            return super.equals(obj);
5269          }
5270          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto) obj;
5271    
5272          boolean result = true;
5273          result = result && (hasReqInfo() == other.hasReqInfo());
5274          if (hasReqInfo()) {
5275            result = result && getReqInfo()
5276                .equals(other.getReqInfo());
5277          }
5278          result = result && (hasTxid() == other.hasTxid());
5279          if (hasTxid()) {
5280            result = result && (getTxid()
5281                == other.getTxid());
5282          }
5283          result = result && (hasLayoutVersion() == other.hasLayoutVersion());
5284          if (hasLayoutVersion()) {
5285            result = result && (getLayoutVersion()
5286                == other.getLayoutVersion());
5287          }
5288          result = result &&
5289              getUnknownFields().equals(other.getUnknownFields());
5290          return result;
5291        }
5292    
5293        private int memoizedHashCode = 0;
5294        @java.lang.Override
5295        public int hashCode() {
5296          if (memoizedHashCode != 0) {
5297            return memoizedHashCode;
5298          }
5299          int hash = 41;
5300          hash = (19 * hash) + getDescriptorForType().hashCode();
5301          if (hasReqInfo()) {
5302            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
5303            hash = (53 * hash) + getReqInfo().hashCode();
5304          }
5305          if (hasTxid()) {
5306            hash = (37 * hash) + TXID_FIELD_NUMBER;
5307            hash = (53 * hash) + hashLong(getTxid());
5308          }
5309          if (hasLayoutVersion()) {
5310            hash = (37 * hash) + LAYOUTVERSION_FIELD_NUMBER;
5311            hash = (53 * hash) + getLayoutVersion();
5312          }
5313          hash = (29 * hash) + getUnknownFields().hashCode();
5314          memoizedHashCode = hash;
5315          return hash;
5316        }
5317    
5318        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5319            com.google.protobuf.ByteString data)
5320            throws com.google.protobuf.InvalidProtocolBufferException {
5321          return PARSER.parseFrom(data);
5322        }
5323        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5324            com.google.protobuf.ByteString data,
5325            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5326            throws com.google.protobuf.InvalidProtocolBufferException {
5327          return PARSER.parseFrom(data, extensionRegistry);
5328        }
5329        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(byte[] data)
5330            throws com.google.protobuf.InvalidProtocolBufferException {
5331          return PARSER.parseFrom(data);
5332        }
5333        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5334            byte[] data,
5335            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5336            throws com.google.protobuf.InvalidProtocolBufferException {
5337          return PARSER.parseFrom(data, extensionRegistry);
5338        }
5339        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(java.io.InputStream input)
5340            throws java.io.IOException {
5341          return PARSER.parseFrom(input);
5342        }
5343        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5344            java.io.InputStream input,
5345            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5346            throws java.io.IOException {
5347          return PARSER.parseFrom(input, extensionRegistry);
5348        }
5349        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseDelimitedFrom(java.io.InputStream input)
5350            throws java.io.IOException {
5351          return PARSER.parseDelimitedFrom(input);
5352        }
5353        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseDelimitedFrom(
5354            java.io.InputStream input,
5355            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5356            throws java.io.IOException {
5357          return PARSER.parseDelimitedFrom(input, extensionRegistry);
5358        }
5359        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5360            com.google.protobuf.CodedInputStream input)
5361            throws java.io.IOException {
5362          return PARSER.parseFrom(input);
5363        }
5364        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
5365            com.google.protobuf.CodedInputStream input,
5366            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5367            throws java.io.IOException {
5368          return PARSER.parseFrom(input, extensionRegistry);
5369        }
5370    
5371        public static Builder newBuilder() { return Builder.create(); }
5372        public Builder newBuilderForType() { return newBuilder(); }
5373        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto prototype) {
5374          return newBuilder().mergeFrom(prototype);
5375        }
5376        public Builder toBuilder() { return newBuilder(this); }
5377    
5378        @java.lang.Override
5379        protected Builder newBuilderForType(
5380            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
5381          Builder builder = new Builder(parent);
5382          return builder;
5383        }
5384        /**
5385         * Protobuf type {@code hadoop.hdfs.qjournal.StartLogSegmentRequestProto}
5386         *
5387         * <pre>
5388         **
5389         * startLogSegment()
5390         * </pre>
5391         */
5392        public static final class Builder extends
5393            com.google.protobuf.GeneratedMessage.Builder<Builder>
5394           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProtoOrBuilder {
5395          public static final com.google.protobuf.Descriptors.Descriptor
5396              getDescriptor() {
5397            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_StartLogSegmentRequestProto_descriptor;
5398          }
5399    
5400          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
5401              internalGetFieldAccessorTable() {
5402            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_StartLogSegmentRequestProto_fieldAccessorTable
5403                .ensureFieldAccessorsInitialized(
5404                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.Builder.class);
5405          }
5406    
5407          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.newBuilder()
5408          private Builder() {
5409            maybeForceBuilderInitialization();
5410          }
5411    
5412          private Builder(
5413              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
5414            super(parent);
5415            maybeForceBuilderInitialization();
5416          }
5417          private void maybeForceBuilderInitialization() {
5418            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
5419              getReqInfoFieldBuilder();
5420            }
5421          }
5422          private static Builder create() {
5423            return new Builder();
5424          }
5425    
5426          public Builder clear() {
5427            super.clear();
5428            if (reqInfoBuilder_ == null) {
5429              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
5430            } else {
5431              reqInfoBuilder_.clear();
5432            }
5433            bitField0_ = (bitField0_ & ~0x00000001);
5434            txid_ = 0L;
5435            bitField0_ = (bitField0_ & ~0x00000002);
5436            layoutVersion_ = 0;
5437            bitField0_ = (bitField0_ & ~0x00000004);
5438            return this;
5439          }
5440    
5441          public Builder clone() {
5442            return create().mergeFrom(buildPartial());
5443          }
5444    
5445          public com.google.protobuf.Descriptors.Descriptor
5446              getDescriptorForType() {
5447            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_StartLogSegmentRequestProto_descriptor;
5448          }
5449    
5450          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto getDefaultInstanceForType() {
5451            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance();
5452          }
5453    
5454          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto build() {
5455            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto result = buildPartial();
5456            if (!result.isInitialized()) {
5457              throw newUninitializedMessageException(result);
5458            }
5459            return result;
5460          }
5461    
5462          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto buildPartial() {
5463            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto(this);
5464            int from_bitField0_ = bitField0_;
5465            int to_bitField0_ = 0;
5466            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
5467              to_bitField0_ |= 0x00000001;
5468            }
5469            if (reqInfoBuilder_ == null) {
5470              result.reqInfo_ = reqInfo_;
5471            } else {
5472              result.reqInfo_ = reqInfoBuilder_.build();
5473            }
5474            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
5475              to_bitField0_ |= 0x00000002;
5476            }
5477            result.txid_ = txid_;
5478            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
5479              to_bitField0_ |= 0x00000004;
5480            }
5481            result.layoutVersion_ = layoutVersion_;
5482            result.bitField0_ = to_bitField0_;
5483            onBuilt();
5484            return result;
5485          }
5486    
5487          public Builder mergeFrom(com.google.protobuf.Message other) {
5488            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto) {
5489              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)other);
5490            } else {
5491              super.mergeFrom(other);
5492              return this;
5493            }
5494          }
5495    
5496          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto other) {
5497            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance()) return this;
5498            if (other.hasReqInfo()) {
5499              mergeReqInfo(other.getReqInfo());
5500            }
5501            if (other.hasTxid()) {
5502              setTxid(other.getTxid());
5503            }
5504            if (other.hasLayoutVersion()) {
5505              setLayoutVersion(other.getLayoutVersion());
5506            }
5507            this.mergeUnknownFields(other.getUnknownFields());
5508            return this;
5509          }
5510    
5511          public final boolean isInitialized() {
5512            if (!hasReqInfo()) {
5513              
5514              return false;
5515            }
5516            if (!hasTxid()) {
5517              
5518              return false;
5519            }
5520            if (!getReqInfo().isInitialized()) {
5521              
5522              return false;
5523            }
5524            return true;
5525          }
5526    
5527          public Builder mergeFrom(
5528              com.google.protobuf.CodedInputStream input,
5529              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5530              throws java.io.IOException {
5531            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parsedMessage = null;
5532            try {
5533              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
5534            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
5535              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto) e.getUnfinishedMessage();
5536              throw e;
5537            } finally {
5538              if (parsedMessage != null) {
5539                mergeFrom(parsedMessage);
5540              }
5541            }
5542            return this;
5543          }
5544          private int bitField0_;
5545    
5546          // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;
5547          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
5548          private com.google.protobuf.SingleFieldBuilder<
5549              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
5550          /**
5551           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
5552           */
5553          public boolean hasReqInfo() {
5554            return ((bitField0_ & 0x00000001) == 0x00000001);
5555          }
5556          /**
5557           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
5558           */
5559          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
5560            if (reqInfoBuilder_ == null) {
5561              return reqInfo_;
5562            } else {
5563              return reqInfoBuilder_.getMessage();
5564            }
5565          }
5566          /**
5567           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
5568           */
5569          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
5570            if (reqInfoBuilder_ == null) {
5571              if (value == null) {
5572                throw new NullPointerException();
5573              }
5574              reqInfo_ = value;
5575              onChanged();
5576            } else {
5577              reqInfoBuilder_.setMessage(value);
5578            }
5579            bitField0_ |= 0x00000001;
5580            return this;
5581          }
5582          /**
5583           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
5584           */
5585          public Builder setReqInfo(
5586              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
5587            if (reqInfoBuilder_ == null) {
5588              reqInfo_ = builderForValue.build();
5589              onChanged();
5590            } else {
5591              reqInfoBuilder_.setMessage(builderForValue.build());
5592            }
5593            bitField0_ |= 0x00000001;
5594            return this;
5595          }
5596          /**
5597           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
5598           */
5599          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
5600            if (reqInfoBuilder_ == null) {
5601              if (((bitField0_ & 0x00000001) == 0x00000001) &&
5602                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
5603                reqInfo_ =
5604                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
5605              } else {
5606                reqInfo_ = value;
5607              }
5608              onChanged();
5609            } else {
5610              reqInfoBuilder_.mergeFrom(value);
5611            }
5612            bitField0_ |= 0x00000001;
5613            return this;
5614          }
5615          /**
5616           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
5617           */
5618          public Builder clearReqInfo() {
5619            if (reqInfoBuilder_ == null) {
5620              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
5621              onChanged();
5622            } else {
5623              reqInfoBuilder_.clear();
5624            }
5625            bitField0_ = (bitField0_ & ~0x00000001);
5626            return this;
5627          }
5628          /**
5629           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
5630           */
5631          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
5632            bitField0_ |= 0x00000001;
5633            onChanged();
5634            return getReqInfoFieldBuilder().getBuilder();
5635          }
5636          /**
5637           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
5638           */
5639          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
5640            if (reqInfoBuilder_ != null) {
5641              return reqInfoBuilder_.getMessageOrBuilder();
5642            } else {
5643              return reqInfo_;
5644            }
5645          }
5646          /**
5647           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
5648           */
5649          private com.google.protobuf.SingleFieldBuilder<
5650              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
5651              getReqInfoFieldBuilder() {
5652            if (reqInfoBuilder_ == null) {
5653              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
5654                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
5655                      reqInfo_,
5656                      getParentForChildren(),
5657                      isClean());
5658              reqInfo_ = null;
5659            }
5660            return reqInfoBuilder_;
5661          }
5662    
5663          // required uint64 txid = 2;
5664          private long txid_ ;
5665          /**
5666           * <code>required uint64 txid = 2;</code>
5667           *
5668           * <pre>
5669           * Transaction ID
5670           * </pre>
5671           */
5672          public boolean hasTxid() {
5673            return ((bitField0_ & 0x00000002) == 0x00000002);
5674          }
5675          /**
5676           * <code>required uint64 txid = 2;</code>
5677           *
5678           * <pre>
5679           * Transaction ID
5680           * </pre>
5681           */
5682          public long getTxid() {
5683            return txid_;
5684          }
5685          /**
5686           * <code>required uint64 txid = 2;</code>
5687           *
5688           * <pre>
5689           * Transaction ID
5690           * </pre>
5691           */
5692          public Builder setTxid(long value) {
5693            bitField0_ |= 0x00000002;
5694            txid_ = value;
5695            onChanged();
5696            return this;
5697          }
5698          /**
5699           * <code>required uint64 txid = 2;</code>
5700           *
5701           * <pre>
5702           * Transaction ID
5703           * </pre>
5704           */
5705          public Builder clearTxid() {
5706            bitField0_ = (bitField0_ & ~0x00000002);
5707            txid_ = 0L;
5708            onChanged();
5709            return this;
5710          }
5711    
5712          // optional sint32 layoutVersion = 3;
5713          private int layoutVersion_ ;
5714          /**
5715           * <code>optional sint32 layoutVersion = 3;</code>
5716           *
5717           * <pre>
5718           * the LayoutVersion in the client
5719           * </pre>
5720           */
5721          public boolean hasLayoutVersion() {
5722            return ((bitField0_ & 0x00000004) == 0x00000004);
5723          }
5724          /**
5725           * <code>optional sint32 layoutVersion = 3;</code>
5726           *
5727           * <pre>
5728           * the LayoutVersion in the client
5729           * </pre>
5730           */
5731          public int getLayoutVersion() {
5732            return layoutVersion_;
5733          }
5734          /**
5735           * <code>optional sint32 layoutVersion = 3;</code>
5736           *
5737           * <pre>
5738           * the LayoutVersion in the client
5739           * </pre>
5740           */
5741          public Builder setLayoutVersion(int value) {
5742            bitField0_ |= 0x00000004;
5743            layoutVersion_ = value;
5744            onChanged();
5745            return this;
5746          }
5747          /**
5748           * <code>optional sint32 layoutVersion = 3;</code>
5749           *
5750           * <pre>
5751           * the LayoutVersion in the client
5752           * </pre>
5753           */
5754          public Builder clearLayoutVersion() {
5755            bitField0_ = (bitField0_ & ~0x00000004);
5756            layoutVersion_ = 0;
5757            onChanged();
5758            return this;
5759          }
5760    
5761          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.StartLogSegmentRequestProto)
5762        }
5763    
5764        static {
5765          defaultInstance = new StartLogSegmentRequestProto(true);
5766          defaultInstance.initFields();
5767        }
5768    
5769        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.StartLogSegmentRequestProto)
5770      }
5771    
5772      public interface StartLogSegmentResponseProtoOrBuilder
5773          extends com.google.protobuf.MessageOrBuilder {
5774      }
5775      /**
5776       * Protobuf type {@code hadoop.hdfs.qjournal.StartLogSegmentResponseProto}
5777       */
5778      public static final class StartLogSegmentResponseProto extends
5779          com.google.protobuf.GeneratedMessage
5780          implements StartLogSegmentResponseProtoOrBuilder {
5781        // Use StartLogSegmentResponseProto.newBuilder() to construct.
5782        private StartLogSegmentResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
5783          super(builder);
5784          this.unknownFields = builder.getUnknownFields();
5785        }
5786        private StartLogSegmentResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
5787    
5788        private static final StartLogSegmentResponseProto defaultInstance;
5789        public static StartLogSegmentResponseProto getDefaultInstance() {
5790          return defaultInstance;
5791        }
5792    
5793        public StartLogSegmentResponseProto getDefaultInstanceForType() {
5794          return defaultInstance;
5795        }
5796    
5797        private final com.google.protobuf.UnknownFieldSet unknownFields;
5798        @java.lang.Override
5799        public final com.google.protobuf.UnknownFieldSet
5800            getUnknownFields() {
5801          return this.unknownFields;
5802        }
5803        private StartLogSegmentResponseProto(
5804            com.google.protobuf.CodedInputStream input,
5805            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5806            throws com.google.protobuf.InvalidProtocolBufferException {
5807          initFields();
5808          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
5809              com.google.protobuf.UnknownFieldSet.newBuilder();
5810          try {
5811            boolean done = false;
5812            while (!done) {
5813              int tag = input.readTag();
5814              switch (tag) {
5815                case 0:
5816                  done = true;
5817                  break;
5818                default: {
5819                  if (!parseUnknownField(input, unknownFields,
5820                                         extensionRegistry, tag)) {
5821                    done = true;
5822                  }
5823                  break;
5824                }
5825              }
5826            }
5827          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
5828            throw e.setUnfinishedMessage(this);
5829          } catch (java.io.IOException e) {
5830            throw new com.google.protobuf.InvalidProtocolBufferException(
5831                e.getMessage()).setUnfinishedMessage(this);
5832          } finally {
5833            this.unknownFields = unknownFields.build();
5834            makeExtensionsImmutable();
5835          }
5836        }
5837        public static final com.google.protobuf.Descriptors.Descriptor
5838            getDescriptor() {
5839          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_StartLogSegmentResponseProto_descriptor;
5840        }
5841    
5842        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
5843            internalGetFieldAccessorTable() {
5844          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_StartLogSegmentResponseProto_fieldAccessorTable
5845              .ensureFieldAccessorsInitialized(
5846                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.Builder.class);
5847        }
5848    
5849        public static com.google.protobuf.Parser<StartLogSegmentResponseProto> PARSER =
5850            new com.google.protobuf.AbstractParser<StartLogSegmentResponseProto>() {
5851          public StartLogSegmentResponseProto parsePartialFrom(
5852              com.google.protobuf.CodedInputStream input,
5853              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5854              throws com.google.protobuf.InvalidProtocolBufferException {
5855            return new StartLogSegmentResponseProto(input, extensionRegistry);
5856          }
5857        };
5858    
5859        @java.lang.Override
5860        public com.google.protobuf.Parser<StartLogSegmentResponseProto> getParserForType() {
5861          return PARSER;
5862        }
5863    
5864        private void initFields() {
5865        }
5866        private byte memoizedIsInitialized = -1;
5867        public final boolean isInitialized() {
5868          byte isInitialized = memoizedIsInitialized;
5869          if (isInitialized != -1) return isInitialized == 1;
5870    
5871          memoizedIsInitialized = 1;
5872          return true;
5873        }
5874    
5875        public void writeTo(com.google.protobuf.CodedOutputStream output)
5876                            throws java.io.IOException {
5877          getSerializedSize();
5878          getUnknownFields().writeTo(output);
5879        }
5880    
5881        private int memoizedSerializedSize = -1;
5882        public int getSerializedSize() {
5883          int size = memoizedSerializedSize;
5884          if (size != -1) return size;
5885    
5886          size = 0;
5887          size += getUnknownFields().getSerializedSize();
5888          memoizedSerializedSize = size;
5889          return size;
5890        }
5891    
5892        private static final long serialVersionUID = 0L;
5893        @java.lang.Override
5894        protected java.lang.Object writeReplace()
5895            throws java.io.ObjectStreamException {
5896          return super.writeReplace();
5897        }
5898    
5899        @java.lang.Override
5900        public boolean equals(final java.lang.Object obj) {
5901          if (obj == this) {
5902           return true;
5903          }
5904          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto)) {
5905            return super.equals(obj);
5906          }
5907          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) obj;
5908    
5909          boolean result = true;
5910          result = result &&
5911              getUnknownFields().equals(other.getUnknownFields());
5912          return result;
5913        }
5914    
5915        private int memoizedHashCode = 0;
5916        @java.lang.Override
5917        public int hashCode() {
5918          if (memoizedHashCode != 0) {
5919            return memoizedHashCode;
5920          }
5921          int hash = 41;
5922          hash = (19 * hash) + getDescriptorForType().hashCode();
5923          hash = (29 * hash) + getUnknownFields().hashCode();
5924          memoizedHashCode = hash;
5925          return hash;
5926        }
5927    
5928        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5929            com.google.protobuf.ByteString data)
5930            throws com.google.protobuf.InvalidProtocolBufferException {
5931          return PARSER.parseFrom(data);
5932        }
5933        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5934            com.google.protobuf.ByteString data,
5935            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5936            throws com.google.protobuf.InvalidProtocolBufferException {
5937          return PARSER.parseFrom(data, extensionRegistry);
5938        }
5939        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(byte[] data)
5940            throws com.google.protobuf.InvalidProtocolBufferException {
5941          return PARSER.parseFrom(data);
5942        }
5943        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5944            byte[] data,
5945            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5946            throws com.google.protobuf.InvalidProtocolBufferException {
5947          return PARSER.parseFrom(data, extensionRegistry);
5948        }
5949        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(java.io.InputStream input)
5950            throws java.io.IOException {
5951          return PARSER.parseFrom(input);
5952        }
5953        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5954            java.io.InputStream input,
5955            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5956            throws java.io.IOException {
5957          return PARSER.parseFrom(input, extensionRegistry);
5958        }
5959        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseDelimitedFrom(java.io.InputStream input)
5960            throws java.io.IOException {
5961          return PARSER.parseDelimitedFrom(input);
5962        }
5963        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseDelimitedFrom(
5964            java.io.InputStream input,
5965            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5966            throws java.io.IOException {
5967          return PARSER.parseDelimitedFrom(input, extensionRegistry);
5968        }
5969        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5970            com.google.protobuf.CodedInputStream input)
5971            throws java.io.IOException {
5972          return PARSER.parseFrom(input);
5973        }
5974        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
5975            com.google.protobuf.CodedInputStream input,
5976            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5977            throws java.io.IOException {
5978          return PARSER.parseFrom(input, extensionRegistry);
5979        }
5980    
5981        public static Builder newBuilder() { return Builder.create(); }
5982        public Builder newBuilderForType() { return newBuilder(); }
5983        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto prototype) {
5984          return newBuilder().mergeFrom(prototype);
5985        }
5986        public Builder toBuilder() { return newBuilder(this); }
5987    
5988        @java.lang.Override
5989        protected Builder newBuilderForType(
5990            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
5991          Builder builder = new Builder(parent);
5992          return builder;
5993        }
5994        /**
5995         * Protobuf type {@code hadoop.hdfs.qjournal.StartLogSegmentResponseProto}
5996         */
5997        public static final class Builder extends
5998            com.google.protobuf.GeneratedMessage.Builder<Builder>
5999           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProtoOrBuilder {
6000          public static final com.google.protobuf.Descriptors.Descriptor
6001              getDescriptor() {
6002            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_StartLogSegmentResponseProto_descriptor;
6003          }
6004    
6005          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6006              internalGetFieldAccessorTable() {
6007            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_StartLogSegmentResponseProto_fieldAccessorTable
6008                .ensureFieldAccessorsInitialized(
6009                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.Builder.class);
6010          }
6011    
6012          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.newBuilder()
6013          private Builder() {
6014            maybeForceBuilderInitialization();
6015          }
6016    
6017          private Builder(
6018              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
6019            super(parent);
6020            maybeForceBuilderInitialization();
6021          }
6022          private void maybeForceBuilderInitialization() {
6023            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
6024            }
6025          }
6026          private static Builder create() {
6027            return new Builder();
6028          }
6029    
6030          public Builder clear() {
6031            super.clear();
6032            return this;
6033          }
6034    
6035          public Builder clone() {
6036            return create().mergeFrom(buildPartial());
6037          }
6038    
6039          public com.google.protobuf.Descriptors.Descriptor
6040              getDescriptorForType() {
6041            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_StartLogSegmentResponseProto_descriptor;
6042          }
6043    
6044          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto getDefaultInstanceForType() {
6045            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance();
6046          }
6047    
6048          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto build() {
6049            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto result = buildPartial();
6050            if (!result.isInitialized()) {
6051              throw newUninitializedMessageException(result);
6052            }
6053            return result;
6054          }
6055    
6056          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto buildPartial() {
6057            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto(this);
6058            onBuilt();
6059            return result;
6060          }
6061    
6062          public Builder mergeFrom(com.google.protobuf.Message other) {
6063            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) {
6064              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto)other);
6065            } else {
6066              super.mergeFrom(other);
6067              return this;
6068            }
6069          }
6070    
6071          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto other) {
6072            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance()) return this;
6073            this.mergeUnknownFields(other.getUnknownFields());
6074            return this;
6075          }
6076    
6077          public final boolean isInitialized() {
6078            return true;
6079          }
6080    
6081          public Builder mergeFrom(
6082              com.google.protobuf.CodedInputStream input,
6083              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6084              throws java.io.IOException {
6085            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parsedMessage = null;
6086            try {
6087              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
6088            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
6089              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) e.getUnfinishedMessage();
6090              throw e;
6091            } finally {
6092              if (parsedMessage != null) {
6093                mergeFrom(parsedMessage);
6094              }
6095            }
6096            return this;
6097          }
6098    
6099          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.StartLogSegmentResponseProto)
6100        }
6101    
6102        static {
6103          defaultInstance = new StartLogSegmentResponseProto(true);
6104          defaultInstance.initFields();
6105        }
6106    
6107        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.StartLogSegmentResponseProto)
6108      }
6109    
6110      public interface FinalizeLogSegmentRequestProtoOrBuilder
6111          extends com.google.protobuf.MessageOrBuilder {
6112    
6113        // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;
6114        /**
6115         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
6116         */
6117        boolean hasReqInfo();
6118        /**
6119         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
6120         */
6121        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
6122        /**
6123         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
6124         */
6125        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
6126    
6127        // required uint64 startTxId = 2;
6128        /**
6129         * <code>required uint64 startTxId = 2;</code>
6130         */
6131        boolean hasStartTxId();
6132        /**
6133         * <code>required uint64 startTxId = 2;</code>
6134         */
6135        long getStartTxId();
6136    
6137        // required uint64 endTxId = 3;
6138        /**
6139         * <code>required uint64 endTxId = 3;</code>
6140         */
6141        boolean hasEndTxId();
6142        /**
6143         * <code>required uint64 endTxId = 3;</code>
6144         */
6145        long getEndTxId();
6146      }
6147      /**
6148       * Protobuf type {@code hadoop.hdfs.qjournal.FinalizeLogSegmentRequestProto}
6149       *
6150       * <pre>
6151       **
6152       * finalizeLogSegment()
6153       * </pre>
6154       */
6155      public static final class FinalizeLogSegmentRequestProto extends
6156          com.google.protobuf.GeneratedMessage
6157          implements FinalizeLogSegmentRequestProtoOrBuilder {
6158        // Use FinalizeLogSegmentRequestProto.newBuilder() to construct.
6159        private FinalizeLogSegmentRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
6160          super(builder);
6161          this.unknownFields = builder.getUnknownFields();
6162        }
6163        private FinalizeLogSegmentRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
6164    
6165        private static final FinalizeLogSegmentRequestProto defaultInstance;
6166        public static FinalizeLogSegmentRequestProto getDefaultInstance() {
6167          return defaultInstance;
6168        }
6169    
6170        public FinalizeLogSegmentRequestProto getDefaultInstanceForType() {
6171          return defaultInstance;
6172        }
6173    
6174        private final com.google.protobuf.UnknownFieldSet unknownFields;
6175        @java.lang.Override
6176        public final com.google.protobuf.UnknownFieldSet
6177            getUnknownFields() {
6178          return this.unknownFields;
6179        }
6180        private FinalizeLogSegmentRequestProto(
6181            com.google.protobuf.CodedInputStream input,
6182            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6183            throws com.google.protobuf.InvalidProtocolBufferException {
6184          initFields();
6185          int mutable_bitField0_ = 0;
6186          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
6187              com.google.protobuf.UnknownFieldSet.newBuilder();
6188          try {
6189            boolean done = false;
6190            while (!done) {
6191              int tag = input.readTag();
6192              switch (tag) {
6193                case 0:
6194                  done = true;
6195                  break;
6196                default: {
6197                  if (!parseUnknownField(input, unknownFields,
6198                                         extensionRegistry, tag)) {
6199                    done = true;
6200                  }
6201                  break;
6202                }
6203                case 10: {
6204                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
6205                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
6206                    subBuilder = reqInfo_.toBuilder();
6207                  }
6208                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
6209                  if (subBuilder != null) {
6210                    subBuilder.mergeFrom(reqInfo_);
6211                    reqInfo_ = subBuilder.buildPartial();
6212                  }
6213                  bitField0_ |= 0x00000001;
6214                  break;
6215                }
6216                case 16: {
6217                  bitField0_ |= 0x00000002;
6218                  startTxId_ = input.readUInt64();
6219                  break;
6220                }
6221                case 24: {
6222                  bitField0_ |= 0x00000004;
6223                  endTxId_ = input.readUInt64();
6224                  break;
6225                }
6226              }
6227            }
6228          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
6229            throw e.setUnfinishedMessage(this);
6230          } catch (java.io.IOException e) {
6231            throw new com.google.protobuf.InvalidProtocolBufferException(
6232                e.getMessage()).setUnfinishedMessage(this);
6233          } finally {
6234            this.unknownFields = unknownFields.build();
6235            makeExtensionsImmutable();
6236          }
6237        }
6238        public static final com.google.protobuf.Descriptors.Descriptor
6239            getDescriptor() {
6240          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentRequestProto_descriptor;
6241        }
6242    
6243        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6244            internalGetFieldAccessorTable() {
6245          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentRequestProto_fieldAccessorTable
6246              .ensureFieldAccessorsInitialized(
6247                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.Builder.class);
6248        }
6249    
6250        public static com.google.protobuf.Parser<FinalizeLogSegmentRequestProto> PARSER =
6251            new com.google.protobuf.AbstractParser<FinalizeLogSegmentRequestProto>() {
6252          public FinalizeLogSegmentRequestProto parsePartialFrom(
6253              com.google.protobuf.CodedInputStream input,
6254              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6255              throws com.google.protobuf.InvalidProtocolBufferException {
6256            return new FinalizeLogSegmentRequestProto(input, extensionRegistry);
6257          }
6258        };
6259    
6260        @java.lang.Override
6261        public com.google.protobuf.Parser<FinalizeLogSegmentRequestProto> getParserForType() {
6262          return PARSER;
6263        }
6264    
6265        private int bitField0_;
6266        // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;
6267        public static final int REQINFO_FIELD_NUMBER = 1;
6268        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
6269        /**
6270         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
6271         */
6272        public boolean hasReqInfo() {
6273          return ((bitField0_ & 0x00000001) == 0x00000001);
6274        }
6275        /**
6276         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
6277         */
6278        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
6279          return reqInfo_;
6280        }
6281        /**
6282         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
6283         */
6284        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
6285          return reqInfo_;
6286        }
6287    
6288        // required uint64 startTxId = 2;
6289        public static final int STARTTXID_FIELD_NUMBER = 2;
6290        private long startTxId_;
6291        /**
6292         * <code>required uint64 startTxId = 2;</code>
6293         */
6294        public boolean hasStartTxId() {
6295          return ((bitField0_ & 0x00000002) == 0x00000002);
6296        }
6297        /**
6298         * <code>required uint64 startTxId = 2;</code>
6299         */
6300        public long getStartTxId() {
6301          return startTxId_;
6302        }
6303    
6304        // required uint64 endTxId = 3;
6305        public static final int ENDTXID_FIELD_NUMBER = 3;
6306        private long endTxId_;
6307        /**
6308         * <code>required uint64 endTxId = 3;</code>
6309         */
6310        public boolean hasEndTxId() {
6311          return ((bitField0_ & 0x00000004) == 0x00000004);
6312        }
6313        /**
6314         * <code>required uint64 endTxId = 3;</code>
6315         */
6316        public long getEndTxId() {
6317          return endTxId_;
6318        }
6319    
6320        private void initFields() {
6321          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
6322          startTxId_ = 0L;
6323          endTxId_ = 0L;
6324        }
6325        private byte memoizedIsInitialized = -1;
6326        public final boolean isInitialized() {
6327          byte isInitialized = memoizedIsInitialized;
6328          if (isInitialized != -1) return isInitialized == 1;
6329    
6330          if (!hasReqInfo()) {
6331            memoizedIsInitialized = 0;
6332            return false;
6333          }
6334          if (!hasStartTxId()) {
6335            memoizedIsInitialized = 0;
6336            return false;
6337          }
6338          if (!hasEndTxId()) {
6339            memoizedIsInitialized = 0;
6340            return false;
6341          }
6342          if (!getReqInfo().isInitialized()) {
6343            memoizedIsInitialized = 0;
6344            return false;
6345          }
6346          memoizedIsInitialized = 1;
6347          return true;
6348        }
6349    
6350        public void writeTo(com.google.protobuf.CodedOutputStream output)
6351                            throws java.io.IOException {
6352          getSerializedSize();
6353          if (((bitField0_ & 0x00000001) == 0x00000001)) {
6354            output.writeMessage(1, reqInfo_);
6355          }
6356          if (((bitField0_ & 0x00000002) == 0x00000002)) {
6357            output.writeUInt64(2, startTxId_);
6358          }
6359          if (((bitField0_ & 0x00000004) == 0x00000004)) {
6360            output.writeUInt64(3, endTxId_);
6361          }
6362          getUnknownFields().writeTo(output);
6363        }
6364    
6365        private int memoizedSerializedSize = -1;
6366        public int getSerializedSize() {
6367          int size = memoizedSerializedSize;
6368          if (size != -1) return size;
6369    
6370          size = 0;
6371          if (((bitField0_ & 0x00000001) == 0x00000001)) {
6372            size += com.google.protobuf.CodedOutputStream
6373              .computeMessageSize(1, reqInfo_);
6374          }
6375          if (((bitField0_ & 0x00000002) == 0x00000002)) {
6376            size += com.google.protobuf.CodedOutputStream
6377              .computeUInt64Size(2, startTxId_);
6378          }
6379          if (((bitField0_ & 0x00000004) == 0x00000004)) {
6380            size += com.google.protobuf.CodedOutputStream
6381              .computeUInt64Size(3, endTxId_);
6382          }
6383          size += getUnknownFields().getSerializedSize();
6384          memoizedSerializedSize = size;
6385          return size;
6386        }
6387    
6388        private static final long serialVersionUID = 0L;
6389        @java.lang.Override
6390        protected java.lang.Object writeReplace()
6391            throws java.io.ObjectStreamException {
6392          return super.writeReplace();
6393        }
6394    
6395        @java.lang.Override
6396        public boolean equals(final java.lang.Object obj) {
6397          if (obj == this) {
6398           return true;
6399          }
6400          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)) {
6401            return super.equals(obj);
6402          }
6403          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto) obj;
6404    
6405          boolean result = true;
6406          result = result && (hasReqInfo() == other.hasReqInfo());
6407          if (hasReqInfo()) {
6408            result = result && getReqInfo()
6409                .equals(other.getReqInfo());
6410          }
6411          result = result && (hasStartTxId() == other.hasStartTxId());
6412          if (hasStartTxId()) {
6413            result = result && (getStartTxId()
6414                == other.getStartTxId());
6415          }
6416          result = result && (hasEndTxId() == other.hasEndTxId());
6417          if (hasEndTxId()) {
6418            result = result && (getEndTxId()
6419                == other.getEndTxId());
6420          }
6421          result = result &&
6422              getUnknownFields().equals(other.getUnknownFields());
6423          return result;
6424        }
6425    
6426        private int memoizedHashCode = 0;
6427        @java.lang.Override
6428        public int hashCode() {
6429          if (memoizedHashCode != 0) {
6430            return memoizedHashCode;
6431          }
6432          int hash = 41;
6433          hash = (19 * hash) + getDescriptorForType().hashCode();
6434          if (hasReqInfo()) {
6435            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
6436            hash = (53 * hash) + getReqInfo().hashCode();
6437          }
6438          if (hasStartTxId()) {
6439            hash = (37 * hash) + STARTTXID_FIELD_NUMBER;
6440            hash = (53 * hash) + hashLong(getStartTxId());
6441          }
6442          if (hasEndTxId()) {
6443            hash = (37 * hash) + ENDTXID_FIELD_NUMBER;
6444            hash = (53 * hash) + hashLong(getEndTxId());
6445          }
6446          hash = (29 * hash) + getUnknownFields().hashCode();
6447          memoizedHashCode = hash;
6448          return hash;
6449        }
6450    
6451        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6452            com.google.protobuf.ByteString data)
6453            throws com.google.protobuf.InvalidProtocolBufferException {
6454          return PARSER.parseFrom(data);
6455        }
6456        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6457            com.google.protobuf.ByteString data,
6458            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6459            throws com.google.protobuf.InvalidProtocolBufferException {
6460          return PARSER.parseFrom(data, extensionRegistry);
6461        }
6462        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(byte[] data)
6463            throws com.google.protobuf.InvalidProtocolBufferException {
6464          return PARSER.parseFrom(data);
6465        }
6466        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6467            byte[] data,
6468            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6469            throws com.google.protobuf.InvalidProtocolBufferException {
6470          return PARSER.parseFrom(data, extensionRegistry);
6471        }
6472        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(java.io.InputStream input)
6473            throws java.io.IOException {
6474          return PARSER.parseFrom(input);
6475        }
6476        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6477            java.io.InputStream input,
6478            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6479            throws java.io.IOException {
6480          return PARSER.parseFrom(input, extensionRegistry);
6481        }
6482        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseDelimitedFrom(java.io.InputStream input)
6483            throws java.io.IOException {
6484          return PARSER.parseDelimitedFrom(input);
6485        }
6486        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseDelimitedFrom(
6487            java.io.InputStream input,
6488            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6489            throws java.io.IOException {
6490          return PARSER.parseDelimitedFrom(input, extensionRegistry);
6491        }
6492        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6493            com.google.protobuf.CodedInputStream input)
6494            throws java.io.IOException {
6495          return PARSER.parseFrom(input);
6496        }
6497        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
6498            com.google.protobuf.CodedInputStream input,
6499            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6500            throws java.io.IOException {
6501          return PARSER.parseFrom(input, extensionRegistry);
6502        }
6503    
6504        public static Builder newBuilder() { return Builder.create(); }
6505        public Builder newBuilderForType() { return newBuilder(); }
6506        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto prototype) {
6507          return newBuilder().mergeFrom(prototype);
6508        }
6509        public Builder toBuilder() { return newBuilder(this); }
6510    
6511        @java.lang.Override
6512        protected Builder newBuilderForType(
6513            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
6514          Builder builder = new Builder(parent);
6515          return builder;
6516        }
6517        /**
6518         * Protobuf type {@code hadoop.hdfs.qjournal.FinalizeLogSegmentRequestProto}
6519         *
6520         * <pre>
6521         **
6522         * finalizeLogSegment()
6523         * </pre>
6524         */
6525        public static final class Builder extends
6526            com.google.protobuf.GeneratedMessage.Builder<Builder>
6527           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProtoOrBuilder {
6528          public static final com.google.protobuf.Descriptors.Descriptor
6529              getDescriptor() {
6530            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentRequestProto_descriptor;
6531          }
6532    
6533          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6534              internalGetFieldAccessorTable() {
6535            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentRequestProto_fieldAccessorTable
6536                .ensureFieldAccessorsInitialized(
6537                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.Builder.class);
6538          }
6539    
6540          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.newBuilder()
6541          private Builder() {
6542            maybeForceBuilderInitialization();
6543          }
6544    
6545          private Builder(
6546              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
6547            super(parent);
6548            maybeForceBuilderInitialization();
6549          }
6550          private void maybeForceBuilderInitialization() {
6551            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
6552              getReqInfoFieldBuilder();
6553            }
6554          }
6555          private static Builder create() {
6556            return new Builder();
6557          }
6558    
6559          public Builder clear() {
6560            super.clear();
6561            if (reqInfoBuilder_ == null) {
6562              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
6563            } else {
6564              reqInfoBuilder_.clear();
6565            }
6566            bitField0_ = (bitField0_ & ~0x00000001);
6567            startTxId_ = 0L;
6568            bitField0_ = (bitField0_ & ~0x00000002);
6569            endTxId_ = 0L;
6570            bitField0_ = (bitField0_ & ~0x00000004);
6571            return this;
6572          }
6573    
6574          public Builder clone() {
6575            return create().mergeFrom(buildPartial());
6576          }
6577    
6578          public com.google.protobuf.Descriptors.Descriptor
6579              getDescriptorForType() {
6580            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentRequestProto_descriptor;
6581          }
6582    
6583          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto getDefaultInstanceForType() {
6584            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance();
6585          }
6586    
6587          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto build() {
6588            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto result = buildPartial();
6589            if (!result.isInitialized()) {
6590              throw newUninitializedMessageException(result);
6591            }
6592            return result;
6593          }
6594    
6595          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto buildPartial() {
6596            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto(this);
6597            int from_bitField0_ = bitField0_;
6598            int to_bitField0_ = 0;
6599            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
6600              to_bitField0_ |= 0x00000001;
6601            }
6602            if (reqInfoBuilder_ == null) {
6603              result.reqInfo_ = reqInfo_;
6604            } else {
6605              result.reqInfo_ = reqInfoBuilder_.build();
6606            }
6607            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
6608              to_bitField0_ |= 0x00000002;
6609            }
6610            result.startTxId_ = startTxId_;
6611            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
6612              to_bitField0_ |= 0x00000004;
6613            }
6614            result.endTxId_ = endTxId_;
6615            result.bitField0_ = to_bitField0_;
6616            onBuilt();
6617            return result;
6618          }
6619    
6620          public Builder mergeFrom(com.google.protobuf.Message other) {
6621            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto) {
6622              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)other);
6623            } else {
6624              super.mergeFrom(other);
6625              return this;
6626            }
6627          }
6628    
6629          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto other) {
6630            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance()) return this;
6631            if (other.hasReqInfo()) {
6632              mergeReqInfo(other.getReqInfo());
6633            }
6634            if (other.hasStartTxId()) {
6635              setStartTxId(other.getStartTxId());
6636            }
6637            if (other.hasEndTxId()) {
6638              setEndTxId(other.getEndTxId());
6639            }
6640            this.mergeUnknownFields(other.getUnknownFields());
6641            return this;
6642          }
6643    
6644          public final boolean isInitialized() {
6645            if (!hasReqInfo()) {
6646              
6647              return false;
6648            }
6649            if (!hasStartTxId()) {
6650              
6651              return false;
6652            }
6653            if (!hasEndTxId()) {
6654              
6655              return false;
6656            }
6657            if (!getReqInfo().isInitialized()) {
6658              
6659              return false;
6660            }
6661            return true;
6662          }
6663    
6664          public Builder mergeFrom(
6665              com.google.protobuf.CodedInputStream input,
6666              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6667              throws java.io.IOException {
6668            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parsedMessage = null;
6669            try {
6670              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
6671            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
6672              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto) e.getUnfinishedMessage();
6673              throw e;
6674            } finally {
6675              if (parsedMessage != null) {
6676                mergeFrom(parsedMessage);
6677              }
6678            }
6679            return this;
6680          }
6681          private int bitField0_;
6682    
6683          // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;
6684          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
6685          private com.google.protobuf.SingleFieldBuilder<
6686              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
6687          /**
6688           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
6689           */
6690          public boolean hasReqInfo() {
6691            return ((bitField0_ & 0x00000001) == 0x00000001);
6692          }
6693          /**
6694           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
6695           */
6696          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
6697            if (reqInfoBuilder_ == null) {
6698              return reqInfo_;
6699            } else {
6700              return reqInfoBuilder_.getMessage();
6701            }
6702          }
6703          /**
6704           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
6705           */
6706          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
6707            if (reqInfoBuilder_ == null) {
6708              if (value == null) {
6709                throw new NullPointerException();
6710              }
6711              reqInfo_ = value;
6712              onChanged();
6713            } else {
6714              reqInfoBuilder_.setMessage(value);
6715            }
6716            bitField0_ |= 0x00000001;
6717            return this;
6718          }
6719          /**
6720           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
6721           */
6722          public Builder setReqInfo(
6723              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
6724            if (reqInfoBuilder_ == null) {
6725              reqInfo_ = builderForValue.build();
6726              onChanged();
6727            } else {
6728              reqInfoBuilder_.setMessage(builderForValue.build());
6729            }
6730            bitField0_ |= 0x00000001;
6731            return this;
6732          }
6733          /**
6734           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
6735           */
6736          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
6737            if (reqInfoBuilder_ == null) {
6738              if (((bitField0_ & 0x00000001) == 0x00000001) &&
6739                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
6740                reqInfo_ =
6741                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
6742              } else {
6743                reqInfo_ = value;
6744              }
6745              onChanged();
6746            } else {
6747              reqInfoBuilder_.mergeFrom(value);
6748            }
6749            bitField0_ |= 0x00000001;
6750            return this;
6751          }
6752          /**
6753           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
6754           */
6755          public Builder clearReqInfo() {
6756            if (reqInfoBuilder_ == null) {
6757              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
6758              onChanged();
6759            } else {
6760              reqInfoBuilder_.clear();
6761            }
6762            bitField0_ = (bitField0_ & ~0x00000001);
6763            return this;
6764          }
6765          /**
6766           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
6767           */
6768          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
6769            bitField0_ |= 0x00000001;
6770            onChanged();
6771            return getReqInfoFieldBuilder().getBuilder();
6772          }
6773          /**
6774           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
6775           */
6776          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
6777            if (reqInfoBuilder_ != null) {
6778              return reqInfoBuilder_.getMessageOrBuilder();
6779            } else {
6780              return reqInfo_;
6781            }
6782          }
6783          /**
6784           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
6785           */
6786          private com.google.protobuf.SingleFieldBuilder<
6787              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
6788              getReqInfoFieldBuilder() {
6789            if (reqInfoBuilder_ == null) {
6790              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
6791                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
6792                      reqInfo_,
6793                      getParentForChildren(),
6794                      isClean());
6795              reqInfo_ = null;
6796            }
6797            return reqInfoBuilder_;
6798          }
6799    
6800          // required uint64 startTxId = 2;
6801          private long startTxId_ ;
6802          /**
6803           * <code>required uint64 startTxId = 2;</code>
6804           */
6805          public boolean hasStartTxId() {
6806            return ((bitField0_ & 0x00000002) == 0x00000002);
6807          }
6808          /**
6809           * <code>required uint64 startTxId = 2;</code>
6810           */
6811          public long getStartTxId() {
6812            return startTxId_;
6813          }
6814          /**
6815           * <code>required uint64 startTxId = 2;</code>
6816           */
6817          public Builder setStartTxId(long value) {
6818            bitField0_ |= 0x00000002;
6819            startTxId_ = value;
6820            onChanged();
6821            return this;
6822          }
6823          /**
6824           * <code>required uint64 startTxId = 2;</code>
6825           */
6826          public Builder clearStartTxId() {
6827            bitField0_ = (bitField0_ & ~0x00000002);
6828            startTxId_ = 0L;
6829            onChanged();
6830            return this;
6831          }
6832    
6833          // required uint64 endTxId = 3;
6834          private long endTxId_ ;
6835          /**
6836           * <code>required uint64 endTxId = 3;</code>
6837           */
6838          public boolean hasEndTxId() {
6839            return ((bitField0_ & 0x00000004) == 0x00000004);
6840          }
6841          /**
6842           * <code>required uint64 endTxId = 3;</code>
6843           */
6844          public long getEndTxId() {
6845            return endTxId_;
6846          }
6847          /**
6848           * <code>required uint64 endTxId = 3;</code>
6849           */
6850          public Builder setEndTxId(long value) {
6851            bitField0_ |= 0x00000004;
6852            endTxId_ = value;
6853            onChanged();
6854            return this;
6855          }
6856          /**
6857           * <code>required uint64 endTxId = 3;</code>
6858           */
6859          public Builder clearEndTxId() {
6860            bitField0_ = (bitField0_ & ~0x00000004);
6861            endTxId_ = 0L;
6862            onChanged();
6863            return this;
6864          }
6865    
6866          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.FinalizeLogSegmentRequestProto)
6867        }
6868    
6869        static {
6870          defaultInstance = new FinalizeLogSegmentRequestProto(true);
6871          defaultInstance.initFields();
6872        }
6873    
6874        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.FinalizeLogSegmentRequestProto)
6875      }
6876    
6877      public interface FinalizeLogSegmentResponseProtoOrBuilder
6878          extends com.google.protobuf.MessageOrBuilder {
6879      }
6880      /**
6881       * Protobuf type {@code hadoop.hdfs.qjournal.FinalizeLogSegmentResponseProto}
6882       */
6883      public static final class FinalizeLogSegmentResponseProto extends
6884          com.google.protobuf.GeneratedMessage
6885          implements FinalizeLogSegmentResponseProtoOrBuilder {
6886        // Use FinalizeLogSegmentResponseProto.newBuilder() to construct.
6887        private FinalizeLogSegmentResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
6888          super(builder);
6889          this.unknownFields = builder.getUnknownFields();
6890        }
6891        private FinalizeLogSegmentResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
6892    
6893        private static final FinalizeLogSegmentResponseProto defaultInstance;
6894        public static FinalizeLogSegmentResponseProto getDefaultInstance() {
6895          return defaultInstance;
6896        }
6897    
6898        public FinalizeLogSegmentResponseProto getDefaultInstanceForType() {
6899          return defaultInstance;
6900        }
6901    
6902        private final com.google.protobuf.UnknownFieldSet unknownFields;
6903        @java.lang.Override
6904        public final com.google.protobuf.UnknownFieldSet
6905            getUnknownFields() {
6906          return this.unknownFields;
6907        }
6908        private FinalizeLogSegmentResponseProto(
6909            com.google.protobuf.CodedInputStream input,
6910            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6911            throws com.google.protobuf.InvalidProtocolBufferException {
6912          initFields();
6913          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
6914              com.google.protobuf.UnknownFieldSet.newBuilder();
6915          try {
6916            boolean done = false;
6917            while (!done) {
6918              int tag = input.readTag();
6919              switch (tag) {
6920                case 0:
6921                  done = true;
6922                  break;
6923                default: {
6924                  if (!parseUnknownField(input, unknownFields,
6925                                         extensionRegistry, tag)) {
6926                    done = true;
6927                  }
6928                  break;
6929                }
6930              }
6931            }
6932          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
6933            throw e.setUnfinishedMessage(this);
6934          } catch (java.io.IOException e) {
6935            throw new com.google.protobuf.InvalidProtocolBufferException(
6936                e.getMessage()).setUnfinishedMessage(this);
6937          } finally {
6938            this.unknownFields = unknownFields.build();
6939            makeExtensionsImmutable();
6940          }
6941        }
6942        public static final com.google.protobuf.Descriptors.Descriptor
6943            getDescriptor() {
6944          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentResponseProto_descriptor;
6945        }
6946    
6947        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6948            internalGetFieldAccessorTable() {
6949          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentResponseProto_fieldAccessorTable
6950              .ensureFieldAccessorsInitialized(
6951                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.Builder.class);
6952        }
6953    
6954        public static com.google.protobuf.Parser<FinalizeLogSegmentResponseProto> PARSER =
6955            new com.google.protobuf.AbstractParser<FinalizeLogSegmentResponseProto>() {
6956          public FinalizeLogSegmentResponseProto parsePartialFrom(
6957              com.google.protobuf.CodedInputStream input,
6958              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6959              throws com.google.protobuf.InvalidProtocolBufferException {
6960            return new FinalizeLogSegmentResponseProto(input, extensionRegistry);
6961          }
6962        };
6963    
6964        @java.lang.Override
6965        public com.google.protobuf.Parser<FinalizeLogSegmentResponseProto> getParserForType() {
6966          return PARSER;
6967        }
6968    
6969        private void initFields() {
6970        }
6971        private byte memoizedIsInitialized = -1;
6972        public final boolean isInitialized() {
6973          byte isInitialized = memoizedIsInitialized;
6974          if (isInitialized != -1) return isInitialized == 1;
6975    
6976          memoizedIsInitialized = 1;
6977          return true;
6978        }
6979    
6980        public void writeTo(com.google.protobuf.CodedOutputStream output)
6981                            throws java.io.IOException {
6982          getSerializedSize();
6983          getUnknownFields().writeTo(output);
6984        }
6985    
6986        private int memoizedSerializedSize = -1;
6987        public int getSerializedSize() {
6988          int size = memoizedSerializedSize;
6989          if (size != -1) return size;
6990    
6991          size = 0;
6992          size += getUnknownFields().getSerializedSize();
6993          memoizedSerializedSize = size;
6994          return size;
6995        }
6996    
6997        private static final long serialVersionUID = 0L;
6998        @java.lang.Override
6999        protected java.lang.Object writeReplace()
7000            throws java.io.ObjectStreamException {
7001          return super.writeReplace();
7002        }
7003    
7004        @java.lang.Override
7005        public boolean equals(final java.lang.Object obj) {
7006          if (obj == this) {
7007           return true;
7008          }
7009          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto)) {
7010            return super.equals(obj);
7011          }
7012          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) obj;
7013    
7014          boolean result = true;
7015          result = result &&
7016              getUnknownFields().equals(other.getUnknownFields());
7017          return result;
7018        }
7019    
7020        private int memoizedHashCode = 0;
7021        @java.lang.Override
7022        public int hashCode() {
7023          if (memoizedHashCode != 0) {
7024            return memoizedHashCode;
7025          }
7026          int hash = 41;
7027          hash = (19 * hash) + getDescriptorForType().hashCode();
7028          hash = (29 * hash) + getUnknownFields().hashCode();
7029          memoizedHashCode = hash;
7030          return hash;
7031        }
7032    
7033        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
7034            com.google.protobuf.ByteString data)
7035            throws com.google.protobuf.InvalidProtocolBufferException {
7036          return PARSER.parseFrom(data);
7037        }
7038        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
7039            com.google.protobuf.ByteString data,
7040            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7041            throws com.google.protobuf.InvalidProtocolBufferException {
7042          return PARSER.parseFrom(data, extensionRegistry);
7043        }
7044        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(byte[] data)
7045            throws com.google.protobuf.InvalidProtocolBufferException {
7046          return PARSER.parseFrom(data);
7047        }
7048        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
7049            byte[] data,
7050            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7051            throws com.google.protobuf.InvalidProtocolBufferException {
7052          return PARSER.parseFrom(data, extensionRegistry);
7053        }
7054        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(java.io.InputStream input)
7055            throws java.io.IOException {
7056          return PARSER.parseFrom(input);
7057        }
7058        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
7059            java.io.InputStream input,
7060            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7061            throws java.io.IOException {
7062          return PARSER.parseFrom(input, extensionRegistry);
7063        }
7064        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseDelimitedFrom(java.io.InputStream input)
7065            throws java.io.IOException {
7066          return PARSER.parseDelimitedFrom(input);
7067        }
7068        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseDelimitedFrom(
7069            java.io.InputStream input,
7070            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7071            throws java.io.IOException {
7072          return PARSER.parseDelimitedFrom(input, extensionRegistry);
7073        }
7074        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
7075            com.google.protobuf.CodedInputStream input)
7076            throws java.io.IOException {
7077          return PARSER.parseFrom(input);
7078        }
7079        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
7080            com.google.protobuf.CodedInputStream input,
7081            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7082            throws java.io.IOException {
7083          return PARSER.parseFrom(input, extensionRegistry);
7084        }
7085    
7086        public static Builder newBuilder() { return Builder.create(); }
7087        public Builder newBuilderForType() { return newBuilder(); }
7088        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto prototype) {
7089          return newBuilder().mergeFrom(prototype);
7090        }
7091        public Builder toBuilder() { return newBuilder(this); }
7092    
7093        @java.lang.Override
7094        protected Builder newBuilderForType(
7095            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7096          Builder builder = new Builder(parent);
7097          return builder;
7098        }
7099        /**
7100         * Protobuf type {@code hadoop.hdfs.qjournal.FinalizeLogSegmentResponseProto}
7101         */
7102        public static final class Builder extends
7103            com.google.protobuf.GeneratedMessage.Builder<Builder>
7104           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProtoOrBuilder {
7105          public static final com.google.protobuf.Descriptors.Descriptor
7106              getDescriptor() {
7107            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentResponseProto_descriptor;
7108          }
7109    
7110          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7111              internalGetFieldAccessorTable() {
7112            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentResponseProto_fieldAccessorTable
7113                .ensureFieldAccessorsInitialized(
7114                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.Builder.class);
7115          }
7116    
7117          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.newBuilder()
7118          private Builder() {
7119            maybeForceBuilderInitialization();
7120          }
7121    
7122          private Builder(
7123              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7124            super(parent);
7125            maybeForceBuilderInitialization();
7126          }
7127          private void maybeForceBuilderInitialization() {
7128            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
7129            }
7130          }
7131          private static Builder create() {
7132            return new Builder();
7133          }
7134    
7135          public Builder clear() {
7136            super.clear();
7137            return this;
7138          }
7139    
7140          public Builder clone() {
7141            return create().mergeFrom(buildPartial());
7142          }
7143    
7144          public com.google.protobuf.Descriptors.Descriptor
7145              getDescriptorForType() {
7146            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentResponseProto_descriptor;
7147          }
7148    
7149          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto getDefaultInstanceForType() {
7150            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance();
7151          }
7152    
7153          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto build() {
7154            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto result = buildPartial();
7155            if (!result.isInitialized()) {
7156              throw newUninitializedMessageException(result);
7157            }
7158            return result;
7159          }
7160    
7161          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto buildPartial() {
7162            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto(this);
7163            onBuilt();
7164            return result;
7165          }
7166    
7167          public Builder mergeFrom(com.google.protobuf.Message other) {
7168            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) {
7169              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto)other);
7170            } else {
7171              super.mergeFrom(other);
7172              return this;
7173            }
7174          }
7175    
7176          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto other) {
7177            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance()) return this;
7178            this.mergeUnknownFields(other.getUnknownFields());
7179            return this;
7180          }
7181    
7182          public final boolean isInitialized() {
7183            return true;
7184          }
7185    
7186          public Builder mergeFrom(
7187              com.google.protobuf.CodedInputStream input,
7188              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7189              throws java.io.IOException {
7190            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parsedMessage = null;
7191            try {
7192              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
7193            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
7194              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) e.getUnfinishedMessage();
7195              throw e;
7196            } finally {
7197              if (parsedMessage != null) {
7198                mergeFrom(parsedMessage);
7199              }
7200            }
7201            return this;
7202          }
7203    
7204          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.FinalizeLogSegmentResponseProto)
7205        }
7206    
7207        static {
7208          defaultInstance = new FinalizeLogSegmentResponseProto(true);
7209          defaultInstance.initFields();
7210        }
7211    
7212        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.FinalizeLogSegmentResponseProto)
7213      }
7214    
7215      public interface PurgeLogsRequestProtoOrBuilder
7216          extends com.google.protobuf.MessageOrBuilder {
7217    
7218        // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;
7219        /**
7220         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
7221         */
7222        boolean hasReqInfo();
7223        /**
7224         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
7225         */
7226        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
7227        /**
7228         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
7229         */
7230        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
7231    
7232        // required uint64 minTxIdToKeep = 2;
7233        /**
7234         * <code>required uint64 minTxIdToKeep = 2;</code>
7235         */
7236        boolean hasMinTxIdToKeep();
7237        /**
7238         * <code>required uint64 minTxIdToKeep = 2;</code>
7239         */
7240        long getMinTxIdToKeep();
7241      }
7242      /**
7243       * Protobuf type {@code hadoop.hdfs.qjournal.PurgeLogsRequestProto}
7244       *
7245       * <pre>
7246       **
7247       * purgeLogs()
7248       * </pre>
7249       */
7250      public static final class PurgeLogsRequestProto extends
7251          com.google.protobuf.GeneratedMessage
7252          implements PurgeLogsRequestProtoOrBuilder {
7253        // Use PurgeLogsRequestProto.newBuilder() to construct.
7254        private PurgeLogsRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
7255          super(builder);
7256          this.unknownFields = builder.getUnknownFields();
7257        }
7258        private PurgeLogsRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
7259    
7260        private static final PurgeLogsRequestProto defaultInstance;
7261        public static PurgeLogsRequestProto getDefaultInstance() {
7262          return defaultInstance;
7263        }
7264    
7265        public PurgeLogsRequestProto getDefaultInstanceForType() {
7266          return defaultInstance;
7267        }
7268    
7269        private final com.google.protobuf.UnknownFieldSet unknownFields;
7270        @java.lang.Override
7271        public final com.google.protobuf.UnknownFieldSet
7272            getUnknownFields() {
7273          return this.unknownFields;
7274        }
7275        private PurgeLogsRequestProto(
7276            com.google.protobuf.CodedInputStream input,
7277            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7278            throws com.google.protobuf.InvalidProtocolBufferException {
7279          initFields();
7280          int mutable_bitField0_ = 0;
7281          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
7282              com.google.protobuf.UnknownFieldSet.newBuilder();
7283          try {
7284            boolean done = false;
7285            while (!done) {
7286              int tag = input.readTag();
7287              switch (tag) {
7288                case 0:
7289                  done = true;
7290                  break;
7291                default: {
7292                  if (!parseUnknownField(input, unknownFields,
7293                                         extensionRegistry, tag)) {
7294                    done = true;
7295                  }
7296                  break;
7297                }
7298                case 10: {
7299                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
7300                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
7301                    subBuilder = reqInfo_.toBuilder();
7302                  }
7303                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
7304                  if (subBuilder != null) {
7305                    subBuilder.mergeFrom(reqInfo_);
7306                    reqInfo_ = subBuilder.buildPartial();
7307                  }
7308                  bitField0_ |= 0x00000001;
7309                  break;
7310                }
7311                case 16: {
7312                  bitField0_ |= 0x00000002;
7313                  minTxIdToKeep_ = input.readUInt64();
7314                  break;
7315                }
7316              }
7317            }
7318          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
7319            throw e.setUnfinishedMessage(this);
7320          } catch (java.io.IOException e) {
7321            throw new com.google.protobuf.InvalidProtocolBufferException(
7322                e.getMessage()).setUnfinishedMessage(this);
7323          } finally {
7324            this.unknownFields = unknownFields.build();
7325            makeExtensionsImmutable();
7326          }
7327        }
7328        public static final com.google.protobuf.Descriptors.Descriptor
7329            getDescriptor() {
7330          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PurgeLogsRequestProto_descriptor;
7331        }
7332    
7333        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7334            internalGetFieldAccessorTable() {
7335          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PurgeLogsRequestProto_fieldAccessorTable
7336              .ensureFieldAccessorsInitialized(
7337                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.Builder.class);
7338        }
7339    
7340        public static com.google.protobuf.Parser<PurgeLogsRequestProto> PARSER =
7341            new com.google.protobuf.AbstractParser<PurgeLogsRequestProto>() {
7342          public PurgeLogsRequestProto parsePartialFrom(
7343              com.google.protobuf.CodedInputStream input,
7344              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7345              throws com.google.protobuf.InvalidProtocolBufferException {
7346            return new PurgeLogsRequestProto(input, extensionRegistry);
7347          }
7348        };
7349    
7350        @java.lang.Override
7351        public com.google.protobuf.Parser<PurgeLogsRequestProto> getParserForType() {
7352          return PARSER;
7353        }
7354    
7355        private int bitField0_;
7356        // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;
7357        public static final int REQINFO_FIELD_NUMBER = 1;
7358        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
7359        /**
7360         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
7361         */
7362        public boolean hasReqInfo() {
7363          return ((bitField0_ & 0x00000001) == 0x00000001);
7364        }
7365        /**
7366         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
7367         */
7368        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
7369          return reqInfo_;
7370        }
7371        /**
7372         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
7373         */
7374        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
7375          return reqInfo_;
7376        }
7377    
7378        // required uint64 minTxIdToKeep = 2;
7379        public static final int MINTXIDTOKEEP_FIELD_NUMBER = 2;
7380        private long minTxIdToKeep_;
7381        /**
7382         * <code>required uint64 minTxIdToKeep = 2;</code>
7383         */
7384        public boolean hasMinTxIdToKeep() {
7385          return ((bitField0_ & 0x00000002) == 0x00000002);
7386        }
7387        /**
7388         * <code>required uint64 minTxIdToKeep = 2;</code>
7389         */
7390        public long getMinTxIdToKeep() {
7391          return minTxIdToKeep_;
7392        }
7393    
7394        private void initFields() {
7395          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
7396          minTxIdToKeep_ = 0L;
7397        }
7398        private byte memoizedIsInitialized = -1;
7399        public final boolean isInitialized() {
7400          byte isInitialized = memoizedIsInitialized;
7401          if (isInitialized != -1) return isInitialized == 1;
7402    
7403          if (!hasReqInfo()) {
7404            memoizedIsInitialized = 0;
7405            return false;
7406          }
7407          if (!hasMinTxIdToKeep()) {
7408            memoizedIsInitialized = 0;
7409            return false;
7410          }
7411          if (!getReqInfo().isInitialized()) {
7412            memoizedIsInitialized = 0;
7413            return false;
7414          }
7415          memoizedIsInitialized = 1;
7416          return true;
7417        }
7418    
7419        public void writeTo(com.google.protobuf.CodedOutputStream output)
7420                            throws java.io.IOException {
7421          getSerializedSize();
7422          if (((bitField0_ & 0x00000001) == 0x00000001)) {
7423            output.writeMessage(1, reqInfo_);
7424          }
7425          if (((bitField0_ & 0x00000002) == 0x00000002)) {
7426            output.writeUInt64(2, minTxIdToKeep_);
7427          }
7428          getUnknownFields().writeTo(output);
7429        }
7430    
7431        private int memoizedSerializedSize = -1;
7432        public int getSerializedSize() {
7433          int size = memoizedSerializedSize;
7434          if (size != -1) return size;
7435    
7436          size = 0;
7437          if (((bitField0_ & 0x00000001) == 0x00000001)) {
7438            size += com.google.protobuf.CodedOutputStream
7439              .computeMessageSize(1, reqInfo_);
7440          }
7441          if (((bitField0_ & 0x00000002) == 0x00000002)) {
7442            size += com.google.protobuf.CodedOutputStream
7443              .computeUInt64Size(2, minTxIdToKeep_);
7444          }
7445          size += getUnknownFields().getSerializedSize();
7446          memoizedSerializedSize = size;
7447          return size;
7448        }
7449    
7450        private static final long serialVersionUID = 0L;
7451        @java.lang.Override
7452        protected java.lang.Object writeReplace()
7453            throws java.io.ObjectStreamException {
7454          return super.writeReplace();
7455        }
7456    
7457        @java.lang.Override
7458        public boolean equals(final java.lang.Object obj) {
7459          if (obj == this) {
7460           return true;
7461          }
7462          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)) {
7463            return super.equals(obj);
7464          }
7465          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto) obj;
7466    
7467          boolean result = true;
7468          result = result && (hasReqInfo() == other.hasReqInfo());
7469          if (hasReqInfo()) {
7470            result = result && getReqInfo()
7471                .equals(other.getReqInfo());
7472          }
7473          result = result && (hasMinTxIdToKeep() == other.hasMinTxIdToKeep());
7474          if (hasMinTxIdToKeep()) {
7475            result = result && (getMinTxIdToKeep()
7476                == other.getMinTxIdToKeep());
7477          }
7478          result = result &&
7479              getUnknownFields().equals(other.getUnknownFields());
7480          return result;
7481        }
7482    
7483        private int memoizedHashCode = 0;
7484        @java.lang.Override
7485        public int hashCode() {
7486          if (memoizedHashCode != 0) {
7487            return memoizedHashCode;
7488          }
7489          int hash = 41;
7490          hash = (19 * hash) + getDescriptorForType().hashCode();
7491          if (hasReqInfo()) {
7492            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
7493            hash = (53 * hash) + getReqInfo().hashCode();
7494          }
7495          if (hasMinTxIdToKeep()) {
7496            hash = (37 * hash) + MINTXIDTOKEEP_FIELD_NUMBER;
7497            hash = (53 * hash) + hashLong(getMinTxIdToKeep());
7498          }
7499          hash = (29 * hash) + getUnknownFields().hashCode();
7500          memoizedHashCode = hash;
7501          return hash;
7502        }
7503    
7504        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7505            com.google.protobuf.ByteString data)
7506            throws com.google.protobuf.InvalidProtocolBufferException {
7507          return PARSER.parseFrom(data);
7508        }
7509        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7510            com.google.protobuf.ByteString data,
7511            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7512            throws com.google.protobuf.InvalidProtocolBufferException {
7513          return PARSER.parseFrom(data, extensionRegistry);
7514        }
7515        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(byte[] data)
7516            throws com.google.protobuf.InvalidProtocolBufferException {
7517          return PARSER.parseFrom(data);
7518        }
7519        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7520            byte[] data,
7521            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7522            throws com.google.protobuf.InvalidProtocolBufferException {
7523          return PARSER.parseFrom(data, extensionRegistry);
7524        }
7525        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(java.io.InputStream input)
7526            throws java.io.IOException {
7527          return PARSER.parseFrom(input);
7528        }
7529        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7530            java.io.InputStream input,
7531            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7532            throws java.io.IOException {
7533          return PARSER.parseFrom(input, extensionRegistry);
7534        }
7535        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseDelimitedFrom(java.io.InputStream input)
7536            throws java.io.IOException {
7537          return PARSER.parseDelimitedFrom(input);
7538        }
7539        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseDelimitedFrom(
7540            java.io.InputStream input,
7541            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7542            throws java.io.IOException {
7543          return PARSER.parseDelimitedFrom(input, extensionRegistry);
7544        }
7545        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7546            com.google.protobuf.CodedInputStream input)
7547            throws java.io.IOException {
7548          return PARSER.parseFrom(input);
7549        }
7550        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
7551            com.google.protobuf.CodedInputStream input,
7552            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7553            throws java.io.IOException {
7554          return PARSER.parseFrom(input, extensionRegistry);
7555        }
7556    
7557        public static Builder newBuilder() { return Builder.create(); }
7558        public Builder newBuilderForType() { return newBuilder(); }
7559        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto prototype) {
7560          return newBuilder().mergeFrom(prototype);
7561        }
7562        public Builder toBuilder() { return newBuilder(this); }
7563    
7564        @java.lang.Override
7565        protected Builder newBuilderForType(
7566            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7567          Builder builder = new Builder(parent);
7568          return builder;
7569        }
7570        /**
7571         * Protobuf type {@code hadoop.hdfs.qjournal.PurgeLogsRequestProto}
7572         *
7573         * <pre>
7574         **
7575         * purgeLogs()
7576         * </pre>
7577         */
7578        public static final class Builder extends
7579            com.google.protobuf.GeneratedMessage.Builder<Builder>
7580           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProtoOrBuilder {
7581          public static final com.google.protobuf.Descriptors.Descriptor
7582              getDescriptor() {
7583            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PurgeLogsRequestProto_descriptor;
7584          }
7585    
7586          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7587              internalGetFieldAccessorTable() {
7588            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PurgeLogsRequestProto_fieldAccessorTable
7589                .ensureFieldAccessorsInitialized(
7590                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.Builder.class);
7591          }
7592    
7593          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.newBuilder()
7594          private Builder() {
7595            maybeForceBuilderInitialization();
7596          }
7597    
7598          private Builder(
7599              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7600            super(parent);
7601            maybeForceBuilderInitialization();
7602          }
7603          private void maybeForceBuilderInitialization() {
7604            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
7605              getReqInfoFieldBuilder();
7606            }
7607          }
7608          private static Builder create() {
7609            return new Builder();
7610          }
7611    
7612          public Builder clear() {
7613            super.clear();
7614            if (reqInfoBuilder_ == null) {
7615              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
7616            } else {
7617              reqInfoBuilder_.clear();
7618            }
7619            bitField0_ = (bitField0_ & ~0x00000001);
7620            minTxIdToKeep_ = 0L;
7621            bitField0_ = (bitField0_ & ~0x00000002);
7622            return this;
7623          }
7624    
7625          public Builder clone() {
7626            return create().mergeFrom(buildPartial());
7627          }
7628    
7629          public com.google.protobuf.Descriptors.Descriptor
7630              getDescriptorForType() {
7631            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PurgeLogsRequestProto_descriptor;
7632          }
7633    
7634          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto getDefaultInstanceForType() {
7635            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance();
7636          }
7637    
7638          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto build() {
7639            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto result = buildPartial();
7640            if (!result.isInitialized()) {
7641              throw newUninitializedMessageException(result);
7642            }
7643            return result;
7644          }
7645    
7646          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto buildPartial() {
7647            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto(this);
7648            int from_bitField0_ = bitField0_;
7649            int to_bitField0_ = 0;
7650            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
7651              to_bitField0_ |= 0x00000001;
7652            }
7653            if (reqInfoBuilder_ == null) {
7654              result.reqInfo_ = reqInfo_;
7655            } else {
7656              result.reqInfo_ = reqInfoBuilder_.build();
7657            }
7658            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
7659              to_bitField0_ |= 0x00000002;
7660            }
7661            result.minTxIdToKeep_ = minTxIdToKeep_;
7662            result.bitField0_ = to_bitField0_;
7663            onBuilt();
7664            return result;
7665          }
7666    
7667          public Builder mergeFrom(com.google.protobuf.Message other) {
7668            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto) {
7669              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)other);
7670            } else {
7671              super.mergeFrom(other);
7672              return this;
7673            }
7674          }
7675    
7676          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto other) {
7677            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance()) return this;
7678            if (other.hasReqInfo()) {
7679              mergeReqInfo(other.getReqInfo());
7680            }
7681            if (other.hasMinTxIdToKeep()) {
7682              setMinTxIdToKeep(other.getMinTxIdToKeep());
7683            }
7684            this.mergeUnknownFields(other.getUnknownFields());
7685            return this;
7686          }
7687    
7688          public final boolean isInitialized() {
7689            if (!hasReqInfo()) {
7690              
7691              return false;
7692            }
7693            if (!hasMinTxIdToKeep()) {
7694              
7695              return false;
7696            }
7697            if (!getReqInfo().isInitialized()) {
7698              
7699              return false;
7700            }
7701            return true;
7702          }
7703    
7704          public Builder mergeFrom(
7705              com.google.protobuf.CodedInputStream input,
7706              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7707              throws java.io.IOException {
7708            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parsedMessage = null;
7709            try {
7710              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
7711            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
7712              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto) e.getUnfinishedMessage();
7713              throw e;
7714            } finally {
7715              if (parsedMessage != null) {
7716                mergeFrom(parsedMessage);
7717              }
7718            }
7719            return this;
7720          }
7721          private int bitField0_;
7722    
7723          // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;
7724          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
7725          private com.google.protobuf.SingleFieldBuilder<
7726              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
7727          /**
7728           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
7729           */
7730          public boolean hasReqInfo() {
7731            return ((bitField0_ & 0x00000001) == 0x00000001);
7732          }
7733          /**
7734           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
7735           */
7736          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
7737            if (reqInfoBuilder_ == null) {
7738              return reqInfo_;
7739            } else {
7740              return reqInfoBuilder_.getMessage();
7741            }
7742          }
7743          /**
7744           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
7745           */
7746          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
7747            if (reqInfoBuilder_ == null) {
7748              if (value == null) {
7749                throw new NullPointerException();
7750              }
7751              reqInfo_ = value;
7752              onChanged();
7753            } else {
7754              reqInfoBuilder_.setMessage(value);
7755            }
7756            bitField0_ |= 0x00000001;
7757            return this;
7758          }
7759          /**
7760           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
7761           */
7762          public Builder setReqInfo(
7763              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
7764            if (reqInfoBuilder_ == null) {
7765              reqInfo_ = builderForValue.build();
7766              onChanged();
7767            } else {
7768              reqInfoBuilder_.setMessage(builderForValue.build());
7769            }
7770            bitField0_ |= 0x00000001;
7771            return this;
7772          }
7773          /**
7774           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
7775           */
7776          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
7777            if (reqInfoBuilder_ == null) {
7778              if (((bitField0_ & 0x00000001) == 0x00000001) &&
7779                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
7780                reqInfo_ =
7781                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
7782              } else {
7783                reqInfo_ = value;
7784              }
7785              onChanged();
7786            } else {
7787              reqInfoBuilder_.mergeFrom(value);
7788            }
7789            bitField0_ |= 0x00000001;
7790            return this;
7791          }
7792          /**
7793           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
7794           */
7795          public Builder clearReqInfo() {
7796            if (reqInfoBuilder_ == null) {
7797              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
7798              onChanged();
7799            } else {
7800              reqInfoBuilder_.clear();
7801            }
7802            bitField0_ = (bitField0_ & ~0x00000001);
7803            return this;
7804          }
7805          /**
7806           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
7807           */
7808          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
7809            bitField0_ |= 0x00000001;
7810            onChanged();
7811            return getReqInfoFieldBuilder().getBuilder();
7812          }
7813          /**
7814           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
7815           */
7816          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
7817            if (reqInfoBuilder_ != null) {
7818              return reqInfoBuilder_.getMessageOrBuilder();
7819            } else {
7820              return reqInfo_;
7821            }
7822          }
7823          /**
7824           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
7825           */
7826          private com.google.protobuf.SingleFieldBuilder<
7827              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
7828              getReqInfoFieldBuilder() {
7829            if (reqInfoBuilder_ == null) {
7830              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
7831                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
7832                      reqInfo_,
7833                      getParentForChildren(),
7834                      isClean());
7835              reqInfo_ = null;
7836            }
7837            return reqInfoBuilder_;
7838          }
7839    
7840          // required uint64 minTxIdToKeep = 2;
7841          private long minTxIdToKeep_ ;
7842          /**
7843           * <code>required uint64 minTxIdToKeep = 2;</code>
7844           */
7845          public boolean hasMinTxIdToKeep() {
7846            return ((bitField0_ & 0x00000002) == 0x00000002);
7847          }
7848          /**
7849           * <code>required uint64 minTxIdToKeep = 2;</code>
7850           */
7851          public long getMinTxIdToKeep() {
7852            return minTxIdToKeep_;
7853          }
7854          /**
7855           * <code>required uint64 minTxIdToKeep = 2;</code>
7856           */
7857          public Builder setMinTxIdToKeep(long value) {
7858            bitField0_ |= 0x00000002;
7859            minTxIdToKeep_ = value;
7860            onChanged();
7861            return this;
7862          }
7863          /**
7864           * <code>required uint64 minTxIdToKeep = 2;</code>
7865           */
7866          public Builder clearMinTxIdToKeep() {
7867            bitField0_ = (bitField0_ & ~0x00000002);
7868            minTxIdToKeep_ = 0L;
7869            onChanged();
7870            return this;
7871          }
7872    
7873          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.PurgeLogsRequestProto)
7874        }
7875    
7876        static {
7877          defaultInstance = new PurgeLogsRequestProto(true);
7878          defaultInstance.initFields();
7879        }
7880    
7881        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.PurgeLogsRequestProto)
7882      }
7883    
7884      public interface PurgeLogsResponseProtoOrBuilder
7885          extends com.google.protobuf.MessageOrBuilder {
7886      }
7887      /**
7888       * Protobuf type {@code hadoop.hdfs.qjournal.PurgeLogsResponseProto}
7889       */
7890      public static final class PurgeLogsResponseProto extends
7891          com.google.protobuf.GeneratedMessage
7892          implements PurgeLogsResponseProtoOrBuilder {
7893        // Use PurgeLogsResponseProto.newBuilder() to construct.
7894        private PurgeLogsResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
7895          super(builder);
7896          this.unknownFields = builder.getUnknownFields();
7897        }
7898        private PurgeLogsResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
7899    
7900        private static final PurgeLogsResponseProto defaultInstance;
7901        public static PurgeLogsResponseProto getDefaultInstance() {
7902          return defaultInstance;
7903        }
7904    
7905        public PurgeLogsResponseProto getDefaultInstanceForType() {
7906          return defaultInstance;
7907        }
7908    
7909        private final com.google.protobuf.UnknownFieldSet unknownFields;
7910        @java.lang.Override
7911        public final com.google.protobuf.UnknownFieldSet
7912            getUnknownFields() {
7913          return this.unknownFields;
7914        }
7915        private PurgeLogsResponseProto(
7916            com.google.protobuf.CodedInputStream input,
7917            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7918            throws com.google.protobuf.InvalidProtocolBufferException {
7919          initFields();
7920          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
7921              com.google.protobuf.UnknownFieldSet.newBuilder();
7922          try {
7923            boolean done = false;
7924            while (!done) {
7925              int tag = input.readTag();
7926              switch (tag) {
7927                case 0:
7928                  done = true;
7929                  break;
7930                default: {
7931                  if (!parseUnknownField(input, unknownFields,
7932                                         extensionRegistry, tag)) {
7933                    done = true;
7934                  }
7935                  break;
7936                }
7937              }
7938            }
7939          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
7940            throw e.setUnfinishedMessage(this);
7941          } catch (java.io.IOException e) {
7942            throw new com.google.protobuf.InvalidProtocolBufferException(
7943                e.getMessage()).setUnfinishedMessage(this);
7944          } finally {
7945            this.unknownFields = unknownFields.build();
7946            makeExtensionsImmutable();
7947          }
7948        }
7949        public static final com.google.protobuf.Descriptors.Descriptor
7950            getDescriptor() {
7951          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PurgeLogsResponseProto_descriptor;
7952        }
7953    
7954        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7955            internalGetFieldAccessorTable() {
7956          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PurgeLogsResponseProto_fieldAccessorTable
7957              .ensureFieldAccessorsInitialized(
7958                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.Builder.class);
7959        }
7960    
7961        public static com.google.protobuf.Parser<PurgeLogsResponseProto> PARSER =
7962            new com.google.protobuf.AbstractParser<PurgeLogsResponseProto>() {
7963          public PurgeLogsResponseProto parsePartialFrom(
7964              com.google.protobuf.CodedInputStream input,
7965              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7966              throws com.google.protobuf.InvalidProtocolBufferException {
7967            return new PurgeLogsResponseProto(input, extensionRegistry);
7968          }
7969        };
7970    
7971        @java.lang.Override
7972        public com.google.protobuf.Parser<PurgeLogsResponseProto> getParserForType() {
7973          return PARSER;
7974        }
7975    
7976        private void initFields() {
7977        }
7978        private byte memoizedIsInitialized = -1;
7979        public final boolean isInitialized() {
7980          byte isInitialized = memoizedIsInitialized;
7981          if (isInitialized != -1) return isInitialized == 1;
7982    
7983          memoizedIsInitialized = 1;
7984          return true;
7985        }
7986    
7987        public void writeTo(com.google.protobuf.CodedOutputStream output)
7988                            throws java.io.IOException {
7989          getSerializedSize();
7990          getUnknownFields().writeTo(output);
7991        }
7992    
7993        private int memoizedSerializedSize = -1;
7994        public int getSerializedSize() {
7995          int size = memoizedSerializedSize;
7996          if (size != -1) return size;
7997    
7998          size = 0;
7999          size += getUnknownFields().getSerializedSize();
8000          memoizedSerializedSize = size;
8001          return size;
8002        }
8003    
8004        private static final long serialVersionUID = 0L;
8005        @java.lang.Override
8006        protected java.lang.Object writeReplace()
8007            throws java.io.ObjectStreamException {
8008          return super.writeReplace();
8009        }
8010    
8011        @java.lang.Override
8012        public boolean equals(final java.lang.Object obj) {
8013          if (obj == this) {
8014           return true;
8015          }
8016          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto)) {
8017            return super.equals(obj);
8018          }
8019          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) obj;
8020    
8021          boolean result = true;
8022          result = result &&
8023              getUnknownFields().equals(other.getUnknownFields());
8024          return result;
8025        }
8026    
8027        private int memoizedHashCode = 0;
8028        @java.lang.Override
8029        public int hashCode() {
8030          if (memoizedHashCode != 0) {
8031            return memoizedHashCode;
8032          }
8033          int hash = 41;
8034          hash = (19 * hash) + getDescriptorForType().hashCode();
8035          hash = (29 * hash) + getUnknownFields().hashCode();
8036          memoizedHashCode = hash;
8037          return hash;
8038        }
8039    
8040        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
8041            com.google.protobuf.ByteString data)
8042            throws com.google.protobuf.InvalidProtocolBufferException {
8043          return PARSER.parseFrom(data);
8044        }
8045        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
8046            com.google.protobuf.ByteString data,
8047            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8048            throws com.google.protobuf.InvalidProtocolBufferException {
8049          return PARSER.parseFrom(data, extensionRegistry);
8050        }
8051        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(byte[] data)
8052            throws com.google.protobuf.InvalidProtocolBufferException {
8053          return PARSER.parseFrom(data);
8054        }
8055        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
8056            byte[] data,
8057            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8058            throws com.google.protobuf.InvalidProtocolBufferException {
8059          return PARSER.parseFrom(data, extensionRegistry);
8060        }
8061        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(java.io.InputStream input)
8062            throws java.io.IOException {
8063          return PARSER.parseFrom(input);
8064        }
8065        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
8066            java.io.InputStream input,
8067            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8068            throws java.io.IOException {
8069          return PARSER.parseFrom(input, extensionRegistry);
8070        }
8071        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseDelimitedFrom(java.io.InputStream input)
8072            throws java.io.IOException {
8073          return PARSER.parseDelimitedFrom(input);
8074        }
8075        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseDelimitedFrom(
8076            java.io.InputStream input,
8077            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8078            throws java.io.IOException {
8079          return PARSER.parseDelimitedFrom(input, extensionRegistry);
8080        }
8081        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
8082            com.google.protobuf.CodedInputStream input)
8083            throws java.io.IOException {
8084          return PARSER.parseFrom(input);
8085        }
8086        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
8087            com.google.protobuf.CodedInputStream input,
8088            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8089            throws java.io.IOException {
8090          return PARSER.parseFrom(input, extensionRegistry);
8091        }
8092    
8093        public static Builder newBuilder() { return Builder.create(); }
8094        public Builder newBuilderForType() { return newBuilder(); }
8095        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto prototype) {
8096          return newBuilder().mergeFrom(prototype);
8097        }
8098        public Builder toBuilder() { return newBuilder(this); }
8099    
8100        @java.lang.Override
8101        protected Builder newBuilderForType(
8102            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8103          Builder builder = new Builder(parent);
8104          return builder;
8105        }
8106        /**
8107         * Protobuf type {@code hadoop.hdfs.qjournal.PurgeLogsResponseProto}
8108         */
8109        public static final class Builder extends
8110            com.google.protobuf.GeneratedMessage.Builder<Builder>
8111           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProtoOrBuilder {
8112          public static final com.google.protobuf.Descriptors.Descriptor
8113              getDescriptor() {
8114            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PurgeLogsResponseProto_descriptor;
8115          }
8116    
8117          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
8118              internalGetFieldAccessorTable() {
8119            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PurgeLogsResponseProto_fieldAccessorTable
8120                .ensureFieldAccessorsInitialized(
8121                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.Builder.class);
8122          }
8123    
8124          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.newBuilder()
8125          private Builder() {
8126            maybeForceBuilderInitialization();
8127          }
8128    
8129          private Builder(
8130              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8131            super(parent);
8132            maybeForceBuilderInitialization();
8133          }
8134          private void maybeForceBuilderInitialization() {
8135            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
8136            }
8137          }
8138          private static Builder create() {
8139            return new Builder();
8140          }
8141    
8142          public Builder clear() {
8143            super.clear();
8144            return this;
8145          }
8146    
8147          public Builder clone() {
8148            return create().mergeFrom(buildPartial());
8149          }
8150    
8151          public com.google.protobuf.Descriptors.Descriptor
8152              getDescriptorForType() {
8153            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PurgeLogsResponseProto_descriptor;
8154          }
8155    
8156          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto getDefaultInstanceForType() {
8157            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance();
8158          }
8159    
8160          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto build() {
8161            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto result = buildPartial();
8162            if (!result.isInitialized()) {
8163              throw newUninitializedMessageException(result);
8164            }
8165            return result;
8166          }
8167    
8168          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto buildPartial() {
8169            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto(this);
8170            onBuilt();
8171            return result;
8172          }
8173    
8174          public Builder mergeFrom(com.google.protobuf.Message other) {
8175            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) {
8176              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto)other);
8177            } else {
8178              super.mergeFrom(other);
8179              return this;
8180            }
8181          }
8182    
8183          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto other) {
8184            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance()) return this;
8185            this.mergeUnknownFields(other.getUnknownFields());
8186            return this;
8187          }
8188    
8189          public final boolean isInitialized() {
8190            return true;
8191          }
8192    
8193          public Builder mergeFrom(
8194              com.google.protobuf.CodedInputStream input,
8195              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8196              throws java.io.IOException {
8197            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parsedMessage = null;
8198            try {
8199              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
8200            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
8201              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) e.getUnfinishedMessage();
8202              throw e;
8203            } finally {
8204              if (parsedMessage != null) {
8205                mergeFrom(parsedMessage);
8206              }
8207            }
8208            return this;
8209          }
8210    
8211          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.PurgeLogsResponseProto)
8212        }
8213    
8214        static {
8215          defaultInstance = new PurgeLogsResponseProto(true);
8216          defaultInstance.initFields();
8217        }
8218    
8219        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.PurgeLogsResponseProto)
8220      }
8221    
8222      public interface IsFormattedRequestProtoOrBuilder
8223          extends com.google.protobuf.MessageOrBuilder {
8224    
8225        // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
8226        /**
8227         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
8228         */
8229        boolean hasJid();
8230        /**
8231         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
8232         */
8233        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
8234        /**
8235         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
8236         */
8237        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
8238      }
8239      /**
8240       * Protobuf type {@code hadoop.hdfs.qjournal.IsFormattedRequestProto}
8241       *
8242       * <pre>
8243       **
8244       * isFormatted()
8245       * </pre>
8246       */
8247      public static final class IsFormattedRequestProto extends
8248          com.google.protobuf.GeneratedMessage
8249          implements IsFormattedRequestProtoOrBuilder {
8250        // Use IsFormattedRequestProto.newBuilder() to construct.
8251        private IsFormattedRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
8252          super(builder);
8253          this.unknownFields = builder.getUnknownFields();
8254        }
8255        private IsFormattedRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
8256    
8257        private static final IsFormattedRequestProto defaultInstance;
8258        public static IsFormattedRequestProto getDefaultInstance() {
8259          return defaultInstance;
8260        }
8261    
8262        public IsFormattedRequestProto getDefaultInstanceForType() {
8263          return defaultInstance;
8264        }
8265    
8266        private final com.google.protobuf.UnknownFieldSet unknownFields;
8267        @java.lang.Override
8268        public final com.google.protobuf.UnknownFieldSet
8269            getUnknownFields() {
8270          return this.unknownFields;
8271        }
8272        private IsFormattedRequestProto(
8273            com.google.protobuf.CodedInputStream input,
8274            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8275            throws com.google.protobuf.InvalidProtocolBufferException {
8276          initFields();
8277          int mutable_bitField0_ = 0;
8278          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
8279              com.google.protobuf.UnknownFieldSet.newBuilder();
8280          try {
8281            boolean done = false;
8282            while (!done) {
8283              int tag = input.readTag();
8284              switch (tag) {
8285                case 0:
8286                  done = true;
8287                  break;
8288                default: {
8289                  if (!parseUnknownField(input, unknownFields,
8290                                         extensionRegistry, tag)) {
8291                    done = true;
8292                  }
8293                  break;
8294                }
8295                case 10: {
8296                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
8297                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
8298                    subBuilder = jid_.toBuilder();
8299                  }
8300                  jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
8301                  if (subBuilder != null) {
8302                    subBuilder.mergeFrom(jid_);
8303                    jid_ = subBuilder.buildPartial();
8304                  }
8305                  bitField0_ |= 0x00000001;
8306                  break;
8307                }
8308              }
8309            }
8310          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
8311            throw e.setUnfinishedMessage(this);
8312          } catch (java.io.IOException e) {
8313            throw new com.google.protobuf.InvalidProtocolBufferException(
8314                e.getMessage()).setUnfinishedMessage(this);
8315          } finally {
8316            this.unknownFields = unknownFields.build();
8317            makeExtensionsImmutable();
8318          }
8319        }
8320        public static final com.google.protobuf.Descriptors.Descriptor
8321            getDescriptor() {
8322          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_IsFormattedRequestProto_descriptor;
8323        }
8324    
8325        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
8326            internalGetFieldAccessorTable() {
8327          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_IsFormattedRequestProto_fieldAccessorTable
8328              .ensureFieldAccessorsInitialized(
8329                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.Builder.class);
8330        }
8331    
8332        public static com.google.protobuf.Parser<IsFormattedRequestProto> PARSER =
8333            new com.google.protobuf.AbstractParser<IsFormattedRequestProto>() {
8334          public IsFormattedRequestProto parsePartialFrom(
8335              com.google.protobuf.CodedInputStream input,
8336              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8337              throws com.google.protobuf.InvalidProtocolBufferException {
8338            return new IsFormattedRequestProto(input, extensionRegistry);
8339          }
8340        };
8341    
8342        @java.lang.Override
8343        public com.google.protobuf.Parser<IsFormattedRequestProto> getParserForType() {
8344          return PARSER;
8345        }
8346    
8347        private int bitField0_;
8348        // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
8349        public static final int JID_FIELD_NUMBER = 1;
8350        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
8351        /**
8352         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
8353         */
8354        public boolean hasJid() {
8355          return ((bitField0_ & 0x00000001) == 0x00000001);
8356        }
8357        /**
8358         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
8359         */
8360        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
8361          return jid_;
8362        }
8363        /**
8364         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
8365         */
8366        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
8367          return jid_;
8368        }
8369    
8370        private void initFields() {
8371          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
8372        }
8373        private byte memoizedIsInitialized = -1;
8374        public final boolean isInitialized() {
8375          byte isInitialized = memoizedIsInitialized;
8376          if (isInitialized != -1) return isInitialized == 1;
8377    
8378          if (!hasJid()) {
8379            memoizedIsInitialized = 0;
8380            return false;
8381          }
8382          if (!getJid().isInitialized()) {
8383            memoizedIsInitialized = 0;
8384            return false;
8385          }
8386          memoizedIsInitialized = 1;
8387          return true;
8388        }
8389    
8390        public void writeTo(com.google.protobuf.CodedOutputStream output)
8391                            throws java.io.IOException {
8392          getSerializedSize();
8393          if (((bitField0_ & 0x00000001) == 0x00000001)) {
8394            output.writeMessage(1, jid_);
8395          }
8396          getUnknownFields().writeTo(output);
8397        }
8398    
8399        private int memoizedSerializedSize = -1;
8400        public int getSerializedSize() {
8401          int size = memoizedSerializedSize;
8402          if (size != -1) return size;
8403    
8404          size = 0;
8405          if (((bitField0_ & 0x00000001) == 0x00000001)) {
8406            size += com.google.protobuf.CodedOutputStream
8407              .computeMessageSize(1, jid_);
8408          }
8409          size += getUnknownFields().getSerializedSize();
8410          memoizedSerializedSize = size;
8411          return size;
8412        }
8413    
8414        private static final long serialVersionUID = 0L;
8415        @java.lang.Override
8416        protected java.lang.Object writeReplace()
8417            throws java.io.ObjectStreamException {
8418          return super.writeReplace();
8419        }
8420    
8421        @java.lang.Override
8422        public boolean equals(final java.lang.Object obj) {
8423          if (obj == this) {
8424           return true;
8425          }
8426          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)) {
8427            return super.equals(obj);
8428          }
8429          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto) obj;
8430    
8431          boolean result = true;
8432          result = result && (hasJid() == other.hasJid());
8433          if (hasJid()) {
8434            result = result && getJid()
8435                .equals(other.getJid());
8436          }
8437          result = result &&
8438              getUnknownFields().equals(other.getUnknownFields());
8439          return result;
8440        }
8441    
8442        private int memoizedHashCode = 0;
8443        @java.lang.Override
8444        public int hashCode() {
8445          if (memoizedHashCode != 0) {
8446            return memoizedHashCode;
8447          }
8448          int hash = 41;
8449          hash = (19 * hash) + getDescriptorForType().hashCode();
8450          if (hasJid()) {
8451            hash = (37 * hash) + JID_FIELD_NUMBER;
8452            hash = (53 * hash) + getJid().hashCode();
8453          }
8454          hash = (29 * hash) + getUnknownFields().hashCode();
8455          memoizedHashCode = hash;
8456          return hash;
8457        }
8458    
8459        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8460            com.google.protobuf.ByteString data)
8461            throws com.google.protobuf.InvalidProtocolBufferException {
8462          return PARSER.parseFrom(data);
8463        }
8464        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8465            com.google.protobuf.ByteString data,
8466            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8467            throws com.google.protobuf.InvalidProtocolBufferException {
8468          return PARSER.parseFrom(data, extensionRegistry);
8469        }
8470        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(byte[] data)
8471            throws com.google.protobuf.InvalidProtocolBufferException {
8472          return PARSER.parseFrom(data);
8473        }
8474        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8475            byte[] data,
8476            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8477            throws com.google.protobuf.InvalidProtocolBufferException {
8478          return PARSER.parseFrom(data, extensionRegistry);
8479        }
8480        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(java.io.InputStream input)
8481            throws java.io.IOException {
8482          return PARSER.parseFrom(input);
8483        }
8484        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8485            java.io.InputStream input,
8486            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8487            throws java.io.IOException {
8488          return PARSER.parseFrom(input, extensionRegistry);
8489        }
8490        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseDelimitedFrom(java.io.InputStream input)
8491            throws java.io.IOException {
8492          return PARSER.parseDelimitedFrom(input);
8493        }
8494        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseDelimitedFrom(
8495            java.io.InputStream input,
8496            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8497            throws java.io.IOException {
8498          return PARSER.parseDelimitedFrom(input, extensionRegistry);
8499        }
8500        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8501            com.google.protobuf.CodedInputStream input)
8502            throws java.io.IOException {
8503          return PARSER.parseFrom(input);
8504        }
8505        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
8506            com.google.protobuf.CodedInputStream input,
8507            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8508            throws java.io.IOException {
8509          return PARSER.parseFrom(input, extensionRegistry);
8510        }
8511    
8512        public static Builder newBuilder() { return Builder.create(); }
8513        public Builder newBuilderForType() { return newBuilder(); }
8514        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto prototype) {
8515          return newBuilder().mergeFrom(prototype);
8516        }
8517        public Builder toBuilder() { return newBuilder(this); }
8518    
8519        @java.lang.Override
8520        protected Builder newBuilderForType(
8521            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8522          Builder builder = new Builder(parent);
8523          return builder;
8524        }
8525        /**
8526         * Protobuf type {@code hadoop.hdfs.qjournal.IsFormattedRequestProto}
8527         *
8528         * <pre>
8529         **
8530         * isFormatted()
8531         * </pre>
8532         */
8533        public static final class Builder extends
8534            com.google.protobuf.GeneratedMessage.Builder<Builder>
8535           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProtoOrBuilder {
8536          public static final com.google.protobuf.Descriptors.Descriptor
8537              getDescriptor() {
8538            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_IsFormattedRequestProto_descriptor;
8539          }
8540    
8541          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
8542              internalGetFieldAccessorTable() {
8543            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_IsFormattedRequestProto_fieldAccessorTable
8544                .ensureFieldAccessorsInitialized(
8545                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.Builder.class);
8546          }
8547    
8548          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.newBuilder()
8549          private Builder() {
8550            maybeForceBuilderInitialization();
8551          }
8552    
8553          private Builder(
8554              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8555            super(parent);
8556            maybeForceBuilderInitialization();
8557          }
8558          private void maybeForceBuilderInitialization() {
8559            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
8560              getJidFieldBuilder();
8561            }
8562          }
8563          private static Builder create() {
8564            return new Builder();
8565          }
8566    
8567          public Builder clear() {
8568            super.clear();
8569            if (jidBuilder_ == null) {
8570              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
8571            } else {
8572              jidBuilder_.clear();
8573            }
8574            bitField0_ = (bitField0_ & ~0x00000001);
8575            return this;
8576          }
8577    
8578          public Builder clone() {
8579            return create().mergeFrom(buildPartial());
8580          }
8581    
8582          public com.google.protobuf.Descriptors.Descriptor
8583              getDescriptorForType() {
8584            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_IsFormattedRequestProto_descriptor;
8585          }
8586    
8587          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto getDefaultInstanceForType() {
8588            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance();
8589          }
8590    
8591          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto build() {
8592            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto result = buildPartial();
8593            if (!result.isInitialized()) {
8594              throw newUninitializedMessageException(result);
8595            }
8596            return result;
8597          }
8598    
8599          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto buildPartial() {
8600            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto(this);
8601            int from_bitField0_ = bitField0_;
8602            int to_bitField0_ = 0;
8603            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
8604              to_bitField0_ |= 0x00000001;
8605            }
8606            if (jidBuilder_ == null) {
8607              result.jid_ = jid_;
8608            } else {
8609              result.jid_ = jidBuilder_.build();
8610            }
8611            result.bitField0_ = to_bitField0_;
8612            onBuilt();
8613            return result;
8614          }
8615    
8616          public Builder mergeFrom(com.google.protobuf.Message other) {
8617            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto) {
8618              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)other);
8619            } else {
8620              super.mergeFrom(other);
8621              return this;
8622            }
8623          }
8624    
8625          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto other) {
8626            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance()) return this;
8627            if (other.hasJid()) {
8628              mergeJid(other.getJid());
8629            }
8630            this.mergeUnknownFields(other.getUnknownFields());
8631            return this;
8632          }
8633    
8634          public final boolean isInitialized() {
8635            if (!hasJid()) {
8636              
8637              return false;
8638            }
8639            if (!getJid().isInitialized()) {
8640              
8641              return false;
8642            }
8643            return true;
8644          }
8645    
8646          public Builder mergeFrom(
8647              com.google.protobuf.CodedInputStream input,
8648              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8649              throws java.io.IOException {
8650            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parsedMessage = null;
8651            try {
8652              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
8653            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
8654              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto) e.getUnfinishedMessage();
8655              throw e;
8656            } finally {
8657              if (parsedMessage != null) {
8658                mergeFrom(parsedMessage);
8659              }
8660            }
8661            return this;
8662          }
8663          private int bitField0_;
8664    
8665          // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
8666          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
8667          private com.google.protobuf.SingleFieldBuilder<
8668              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
8669          /**
8670           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
8671           */
8672          public boolean hasJid() {
8673            return ((bitField0_ & 0x00000001) == 0x00000001);
8674          }
8675          /**
8676           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
8677           */
8678          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
8679            if (jidBuilder_ == null) {
8680              return jid_;
8681            } else {
8682              return jidBuilder_.getMessage();
8683            }
8684          }
8685          /**
8686           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
8687           */
8688          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
8689            if (jidBuilder_ == null) {
8690              if (value == null) {
8691                throw new NullPointerException();
8692              }
8693              jid_ = value;
8694              onChanged();
8695            } else {
8696              jidBuilder_.setMessage(value);
8697            }
8698            bitField0_ |= 0x00000001;
8699            return this;
8700          }
8701          /**
8702           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
8703           */
8704          public Builder setJid(
8705              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
8706            if (jidBuilder_ == null) {
8707              jid_ = builderForValue.build();
8708              onChanged();
8709            } else {
8710              jidBuilder_.setMessage(builderForValue.build());
8711            }
8712            bitField0_ |= 0x00000001;
8713            return this;
8714          }
8715          /**
8716           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
8717           */
8718          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
8719            if (jidBuilder_ == null) {
8720              if (((bitField0_ & 0x00000001) == 0x00000001) &&
8721                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
8722                jid_ =
8723                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
8724              } else {
8725                jid_ = value;
8726              }
8727              onChanged();
8728            } else {
8729              jidBuilder_.mergeFrom(value);
8730            }
8731            bitField0_ |= 0x00000001;
8732            return this;
8733          }
8734          /**
8735           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
8736           */
8737          public Builder clearJid() {
8738            if (jidBuilder_ == null) {
8739              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
8740              onChanged();
8741            } else {
8742              jidBuilder_.clear();
8743            }
8744            bitField0_ = (bitField0_ & ~0x00000001);
8745            return this;
8746          }
8747          /**
8748           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
8749           */
8750          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
8751            bitField0_ |= 0x00000001;
8752            onChanged();
8753            return getJidFieldBuilder().getBuilder();
8754          }
8755          /**
8756           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
8757           */
8758          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
8759            if (jidBuilder_ != null) {
8760              return jidBuilder_.getMessageOrBuilder();
8761            } else {
8762              return jid_;
8763            }
8764          }
8765          /**
8766           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
8767           */
8768          private com.google.protobuf.SingleFieldBuilder<
8769              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
8770              getJidFieldBuilder() {
8771            if (jidBuilder_ == null) {
8772              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
8773                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
8774                      jid_,
8775                      getParentForChildren(),
8776                      isClean());
8777              jid_ = null;
8778            }
8779            return jidBuilder_;
8780          }
8781    
8782          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.IsFormattedRequestProto)
8783        }
8784    
8785        static {
8786          defaultInstance = new IsFormattedRequestProto(true);
8787          defaultInstance.initFields();
8788        }
8789    
8790        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.IsFormattedRequestProto)
8791      }
8792    
8793      public interface IsFormattedResponseProtoOrBuilder
8794          extends com.google.protobuf.MessageOrBuilder {
8795    
8796        // required bool isFormatted = 1;
8797        /**
8798         * <code>required bool isFormatted = 1;</code>
8799         */
8800        boolean hasIsFormatted();
8801        /**
8802         * <code>required bool isFormatted = 1;</code>
8803         */
8804        boolean getIsFormatted();
8805      }
8806      /**
8807       * Protobuf type {@code hadoop.hdfs.qjournal.IsFormattedResponseProto}
8808       */
8809      public static final class IsFormattedResponseProto extends
8810          com.google.protobuf.GeneratedMessage
8811          implements IsFormattedResponseProtoOrBuilder {
8812        // Use IsFormattedResponseProto.newBuilder() to construct.
8813        private IsFormattedResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
8814          super(builder);
8815          this.unknownFields = builder.getUnknownFields();
8816        }
8817        private IsFormattedResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
8818    
8819        private static final IsFormattedResponseProto defaultInstance;
8820        public static IsFormattedResponseProto getDefaultInstance() {
8821          return defaultInstance;
8822        }
8823    
8824        public IsFormattedResponseProto getDefaultInstanceForType() {
8825          return defaultInstance;
8826        }
8827    
8828        private final com.google.protobuf.UnknownFieldSet unknownFields;
8829        @java.lang.Override
8830        public final com.google.protobuf.UnknownFieldSet
8831            getUnknownFields() {
8832          return this.unknownFields;
8833        }
8834        private IsFormattedResponseProto(
8835            com.google.protobuf.CodedInputStream input,
8836            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8837            throws com.google.protobuf.InvalidProtocolBufferException {
8838          initFields();
8839          int mutable_bitField0_ = 0;
8840          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
8841              com.google.protobuf.UnknownFieldSet.newBuilder();
8842          try {
8843            boolean done = false;
8844            while (!done) {
8845              int tag = input.readTag();
8846              switch (tag) {
8847                case 0:
8848                  done = true;
8849                  break;
8850                default: {
8851                  if (!parseUnknownField(input, unknownFields,
8852                                         extensionRegistry, tag)) {
8853                    done = true;
8854                  }
8855                  break;
8856                }
8857                case 8: {
8858                  bitField0_ |= 0x00000001;
8859                  isFormatted_ = input.readBool();
8860                  break;
8861                }
8862              }
8863            }
8864          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
8865            throw e.setUnfinishedMessage(this);
8866          } catch (java.io.IOException e) {
8867            throw new com.google.protobuf.InvalidProtocolBufferException(
8868                e.getMessage()).setUnfinishedMessage(this);
8869          } finally {
8870            this.unknownFields = unknownFields.build();
8871            makeExtensionsImmutable();
8872          }
8873        }
8874        public static final com.google.protobuf.Descriptors.Descriptor
8875            getDescriptor() {
8876          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_IsFormattedResponseProto_descriptor;
8877        }
8878    
8879        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
8880            internalGetFieldAccessorTable() {
8881          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_IsFormattedResponseProto_fieldAccessorTable
8882              .ensureFieldAccessorsInitialized(
8883                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.Builder.class);
8884        }
8885    
8886        public static com.google.protobuf.Parser<IsFormattedResponseProto> PARSER =
8887            new com.google.protobuf.AbstractParser<IsFormattedResponseProto>() {
8888          public IsFormattedResponseProto parsePartialFrom(
8889              com.google.protobuf.CodedInputStream input,
8890              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8891              throws com.google.protobuf.InvalidProtocolBufferException {
8892            return new IsFormattedResponseProto(input, extensionRegistry);
8893          }
8894        };
8895    
8896        @java.lang.Override
8897        public com.google.protobuf.Parser<IsFormattedResponseProto> getParserForType() {
8898          return PARSER;
8899        }
8900    
8901        private int bitField0_;
8902        // required bool isFormatted = 1;
8903        public static final int ISFORMATTED_FIELD_NUMBER = 1;
8904        private boolean isFormatted_;
8905        /**
8906         * <code>required bool isFormatted = 1;</code>
8907         */
8908        public boolean hasIsFormatted() {
8909          return ((bitField0_ & 0x00000001) == 0x00000001);
8910        }
8911        /**
8912         * <code>required bool isFormatted = 1;</code>
8913         */
8914        public boolean getIsFormatted() {
8915          return isFormatted_;
8916        }
8917    
8918        private void initFields() {
8919          isFormatted_ = false;
8920        }
8921        private byte memoizedIsInitialized = -1;
8922        public final boolean isInitialized() {
8923          byte isInitialized = memoizedIsInitialized;
8924          if (isInitialized != -1) return isInitialized == 1;
8925    
8926          if (!hasIsFormatted()) {
8927            memoizedIsInitialized = 0;
8928            return false;
8929          }
8930          memoizedIsInitialized = 1;
8931          return true;
8932        }
8933    
8934        public void writeTo(com.google.protobuf.CodedOutputStream output)
8935                            throws java.io.IOException {
8936          getSerializedSize();
8937          if (((bitField0_ & 0x00000001) == 0x00000001)) {
8938            output.writeBool(1, isFormatted_);
8939          }
8940          getUnknownFields().writeTo(output);
8941        }
8942    
8943        private int memoizedSerializedSize = -1;
8944        public int getSerializedSize() {
8945          int size = memoizedSerializedSize;
8946          if (size != -1) return size;
8947    
8948          size = 0;
8949          if (((bitField0_ & 0x00000001) == 0x00000001)) {
8950            size += com.google.protobuf.CodedOutputStream
8951              .computeBoolSize(1, isFormatted_);
8952          }
8953          size += getUnknownFields().getSerializedSize();
8954          memoizedSerializedSize = size;
8955          return size;
8956        }
8957    
8958        private static final long serialVersionUID = 0L;
8959        @java.lang.Override
8960        protected java.lang.Object writeReplace()
8961            throws java.io.ObjectStreamException {
8962          return super.writeReplace();
8963        }
8964    
8965        @java.lang.Override
8966        public boolean equals(final java.lang.Object obj) {
8967          if (obj == this) {
8968           return true;
8969          }
8970          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto)) {
8971            return super.equals(obj);
8972          }
8973          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) obj;
8974    
8975          boolean result = true;
8976          result = result && (hasIsFormatted() == other.hasIsFormatted());
8977          if (hasIsFormatted()) {
8978            result = result && (getIsFormatted()
8979                == other.getIsFormatted());
8980          }
8981          result = result &&
8982              getUnknownFields().equals(other.getUnknownFields());
8983          return result;
8984        }
8985    
8986        private int memoizedHashCode = 0;
8987        @java.lang.Override
8988        public int hashCode() {
8989          if (memoizedHashCode != 0) {
8990            return memoizedHashCode;
8991          }
8992          int hash = 41;
8993          hash = (19 * hash) + getDescriptorForType().hashCode();
8994          if (hasIsFormatted()) {
8995            hash = (37 * hash) + ISFORMATTED_FIELD_NUMBER;
8996            hash = (53 * hash) + hashBoolean(getIsFormatted());
8997          }
8998          hash = (29 * hash) + getUnknownFields().hashCode();
8999          memoizedHashCode = hash;
9000          return hash;
9001        }
9002    
9003        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
9004            com.google.protobuf.ByteString data)
9005            throws com.google.protobuf.InvalidProtocolBufferException {
9006          return PARSER.parseFrom(data);
9007        }
9008        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
9009            com.google.protobuf.ByteString data,
9010            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9011            throws com.google.protobuf.InvalidProtocolBufferException {
9012          return PARSER.parseFrom(data, extensionRegistry);
9013        }
9014        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(byte[] data)
9015            throws com.google.protobuf.InvalidProtocolBufferException {
9016          return PARSER.parseFrom(data);
9017        }
9018        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
9019            byte[] data,
9020            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9021            throws com.google.protobuf.InvalidProtocolBufferException {
9022          return PARSER.parseFrom(data, extensionRegistry);
9023        }
9024        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(java.io.InputStream input)
9025            throws java.io.IOException {
9026          return PARSER.parseFrom(input);
9027        }
9028        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
9029            java.io.InputStream input,
9030            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9031            throws java.io.IOException {
9032          return PARSER.parseFrom(input, extensionRegistry);
9033        }
9034        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseDelimitedFrom(java.io.InputStream input)
9035            throws java.io.IOException {
9036          return PARSER.parseDelimitedFrom(input);
9037        }
9038        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseDelimitedFrom(
9039            java.io.InputStream input,
9040            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9041            throws java.io.IOException {
9042          return PARSER.parseDelimitedFrom(input, extensionRegistry);
9043        }
9044        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
9045            com.google.protobuf.CodedInputStream input)
9046            throws java.io.IOException {
9047          return PARSER.parseFrom(input);
9048        }
9049        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
9050            com.google.protobuf.CodedInputStream input,
9051            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9052            throws java.io.IOException {
9053          return PARSER.parseFrom(input, extensionRegistry);
9054        }
9055    
9056        public static Builder newBuilder() { return Builder.create(); }
9057        public Builder newBuilderForType() { return newBuilder(); }
9058        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto prototype) {
9059          return newBuilder().mergeFrom(prototype);
9060        }
9061        public Builder toBuilder() { return newBuilder(this); }
9062    
9063        @java.lang.Override
9064        protected Builder newBuilderForType(
9065            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
9066          Builder builder = new Builder(parent);
9067          return builder;
9068        }
9069        /**
9070         * Protobuf type {@code hadoop.hdfs.qjournal.IsFormattedResponseProto}
9071         */
9072        public static final class Builder extends
9073            com.google.protobuf.GeneratedMessage.Builder<Builder>
9074           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProtoOrBuilder {
9075          public static final com.google.protobuf.Descriptors.Descriptor
9076              getDescriptor() {
9077            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_IsFormattedResponseProto_descriptor;
9078          }
9079    
9080          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
9081              internalGetFieldAccessorTable() {
9082            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_IsFormattedResponseProto_fieldAccessorTable
9083                .ensureFieldAccessorsInitialized(
9084                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.Builder.class);
9085          }
9086    
9087          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.newBuilder()
9088          private Builder() {
9089            maybeForceBuilderInitialization();
9090          }
9091    
9092          private Builder(
9093              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
9094            super(parent);
9095            maybeForceBuilderInitialization();
9096          }
9097          private void maybeForceBuilderInitialization() {
9098            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
9099            }
9100          }
9101          private static Builder create() {
9102            return new Builder();
9103          }
9104    
9105          public Builder clear() {
9106            super.clear();
9107            isFormatted_ = false;
9108            bitField0_ = (bitField0_ & ~0x00000001);
9109            return this;
9110          }
9111    
9112          public Builder clone() {
9113            return create().mergeFrom(buildPartial());
9114          }
9115    
9116          public com.google.protobuf.Descriptors.Descriptor
9117              getDescriptorForType() {
9118            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_IsFormattedResponseProto_descriptor;
9119          }
9120    
9121          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto getDefaultInstanceForType() {
9122            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance();
9123          }
9124    
9125          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto build() {
9126            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto result = buildPartial();
9127            if (!result.isInitialized()) {
9128              throw newUninitializedMessageException(result);
9129            }
9130            return result;
9131          }
9132    
9133          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto buildPartial() {
9134            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto(this);
9135            int from_bitField0_ = bitField0_;
9136            int to_bitField0_ = 0;
9137            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
9138              to_bitField0_ |= 0x00000001;
9139            }
9140            result.isFormatted_ = isFormatted_;
9141            result.bitField0_ = to_bitField0_;
9142            onBuilt();
9143            return result;
9144          }
9145    
9146          public Builder mergeFrom(com.google.protobuf.Message other) {
9147            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) {
9148              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto)other);
9149            } else {
9150              super.mergeFrom(other);
9151              return this;
9152            }
9153          }
9154    
9155          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto other) {
9156            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance()) return this;
9157            if (other.hasIsFormatted()) {
9158              setIsFormatted(other.getIsFormatted());
9159            }
9160            this.mergeUnknownFields(other.getUnknownFields());
9161            return this;
9162          }
9163    
9164          public final boolean isInitialized() {
9165            if (!hasIsFormatted()) {
9166              
9167              return false;
9168            }
9169            return true;
9170          }
9171    
9172          public Builder mergeFrom(
9173              com.google.protobuf.CodedInputStream input,
9174              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9175              throws java.io.IOException {
9176            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parsedMessage = null;
9177            try {
9178              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
9179            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
9180              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) e.getUnfinishedMessage();
9181              throw e;
9182            } finally {
9183              if (parsedMessage != null) {
9184                mergeFrom(parsedMessage);
9185              }
9186            }
9187            return this;
9188          }
9189          private int bitField0_;
9190    
9191          // required bool isFormatted = 1;
9192          private boolean isFormatted_ ;
9193          /**
9194           * <code>required bool isFormatted = 1;</code>
9195           */
9196          public boolean hasIsFormatted() {
9197            return ((bitField0_ & 0x00000001) == 0x00000001);
9198          }
9199          /**
9200           * <code>required bool isFormatted = 1;</code>
9201           */
9202          public boolean getIsFormatted() {
9203            return isFormatted_;
9204          }
9205          /**
9206           * <code>required bool isFormatted = 1;</code>
9207           */
9208          public Builder setIsFormatted(boolean value) {
9209            bitField0_ |= 0x00000001;
9210            isFormatted_ = value;
9211            onChanged();
9212            return this;
9213          }
9214          /**
9215           * <code>required bool isFormatted = 1;</code>
9216           */
9217          public Builder clearIsFormatted() {
9218            bitField0_ = (bitField0_ & ~0x00000001);
9219            isFormatted_ = false;
9220            onChanged();
9221            return this;
9222          }
9223    
9224          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.IsFormattedResponseProto)
9225        }
9226    
9227        static {
9228          defaultInstance = new IsFormattedResponseProto(true);
9229          defaultInstance.initFields();
9230        }
9231    
9232        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.IsFormattedResponseProto)
9233      }
9234    
9235      public interface DiscardSegmentsRequestProtoOrBuilder
9236          extends com.google.protobuf.MessageOrBuilder {
9237    
9238        // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
9239        /**
9240         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
9241         */
9242        boolean hasJid();
9243        /**
9244         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
9245         */
9246        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
9247        /**
9248         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
9249         */
9250        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
9251    
9252        // required uint64 startTxId = 2;
9253        /**
9254         * <code>required uint64 startTxId = 2;</code>
9255         */
9256        boolean hasStartTxId();
9257        /**
9258         * <code>required uint64 startTxId = 2;</code>
9259         */
9260        long getStartTxId();
9261      }
9262      /**
9263       * Protobuf type {@code hadoop.hdfs.qjournal.DiscardSegmentsRequestProto}
9264       *
9265       * <pre>
9266       **
9267       * discardSegments()
9268       * </pre>
9269       */
9270      public static final class DiscardSegmentsRequestProto extends
9271          com.google.protobuf.GeneratedMessage
9272          implements DiscardSegmentsRequestProtoOrBuilder {
9273        // Use DiscardSegmentsRequestProto.newBuilder() to construct.
9274        private DiscardSegmentsRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
9275          super(builder);
9276          this.unknownFields = builder.getUnknownFields();
9277        }
9278        private DiscardSegmentsRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
9279    
9280        private static final DiscardSegmentsRequestProto defaultInstance;
9281        public static DiscardSegmentsRequestProto getDefaultInstance() {
9282          return defaultInstance;
9283        }
9284    
9285        public DiscardSegmentsRequestProto getDefaultInstanceForType() {
9286          return defaultInstance;
9287        }
9288    
9289        private final com.google.protobuf.UnknownFieldSet unknownFields;
9290        @java.lang.Override
9291        public final com.google.protobuf.UnknownFieldSet
9292            getUnknownFields() {
9293          return this.unknownFields;
9294        }
9295        private DiscardSegmentsRequestProto(
9296            com.google.protobuf.CodedInputStream input,
9297            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9298            throws com.google.protobuf.InvalidProtocolBufferException {
9299          initFields();
9300          int mutable_bitField0_ = 0;
9301          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
9302              com.google.protobuf.UnknownFieldSet.newBuilder();
9303          try {
9304            boolean done = false;
9305            while (!done) {
9306              int tag = input.readTag();
9307              switch (tag) {
9308                case 0:
9309                  done = true;
9310                  break;
9311                default: {
9312                  if (!parseUnknownField(input, unknownFields,
9313                                         extensionRegistry, tag)) {
9314                    done = true;
9315                  }
9316                  break;
9317                }
9318                case 10: {
9319                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
9320                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
9321                    subBuilder = jid_.toBuilder();
9322                  }
9323                  jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
9324                  if (subBuilder != null) {
9325                    subBuilder.mergeFrom(jid_);
9326                    jid_ = subBuilder.buildPartial();
9327                  }
9328                  bitField0_ |= 0x00000001;
9329                  break;
9330                }
9331                case 16: {
9332                  bitField0_ |= 0x00000002;
9333                  startTxId_ = input.readUInt64();
9334                  break;
9335                }
9336              }
9337            }
9338          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
9339            throw e.setUnfinishedMessage(this);
9340          } catch (java.io.IOException e) {
9341            throw new com.google.protobuf.InvalidProtocolBufferException(
9342                e.getMessage()).setUnfinishedMessage(this);
9343          } finally {
9344            this.unknownFields = unknownFields.build();
9345            makeExtensionsImmutable();
9346          }
9347        }
9348        public static final com.google.protobuf.Descriptors.Descriptor
9349            getDescriptor() {
9350          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DiscardSegmentsRequestProto_descriptor;
9351        }
9352    
9353        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
9354            internalGetFieldAccessorTable() {
9355          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DiscardSegmentsRequestProto_fieldAccessorTable
9356              .ensureFieldAccessorsInitialized(
9357                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto.Builder.class);
9358        }
9359    
9360        public static com.google.protobuf.Parser<DiscardSegmentsRequestProto> PARSER =
9361            new com.google.protobuf.AbstractParser<DiscardSegmentsRequestProto>() {
9362          public DiscardSegmentsRequestProto parsePartialFrom(
9363              com.google.protobuf.CodedInputStream input,
9364              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9365              throws com.google.protobuf.InvalidProtocolBufferException {
9366            return new DiscardSegmentsRequestProto(input, extensionRegistry);
9367          }
9368        };
9369    
9370        @java.lang.Override
9371        public com.google.protobuf.Parser<DiscardSegmentsRequestProto> getParserForType() {
9372          return PARSER;
9373        }
9374    
9375        private int bitField0_;
9376        // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
9377        public static final int JID_FIELD_NUMBER = 1;
9378        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
9379        /**
9380         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
9381         */
9382        public boolean hasJid() {
9383          return ((bitField0_ & 0x00000001) == 0x00000001);
9384        }
9385        /**
9386         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
9387         */
9388        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
9389          return jid_;
9390        }
9391        /**
9392         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
9393         */
9394        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
9395          return jid_;
9396        }
9397    
9398        // required uint64 startTxId = 2;
9399        public static final int STARTTXID_FIELD_NUMBER = 2;
9400        private long startTxId_;
9401        /**
9402         * <code>required uint64 startTxId = 2;</code>
9403         */
9404        public boolean hasStartTxId() {
9405          return ((bitField0_ & 0x00000002) == 0x00000002);
9406        }
9407        /**
9408         * <code>required uint64 startTxId = 2;</code>
9409         */
9410        public long getStartTxId() {
9411          return startTxId_;
9412        }
9413    
9414        private void initFields() {
9415          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
9416          startTxId_ = 0L;
9417        }
9418        private byte memoizedIsInitialized = -1;
9419        public final boolean isInitialized() {
9420          byte isInitialized = memoizedIsInitialized;
9421          if (isInitialized != -1) return isInitialized == 1;
9422    
9423          if (!hasJid()) {
9424            memoizedIsInitialized = 0;
9425            return false;
9426          }
9427          if (!hasStartTxId()) {
9428            memoizedIsInitialized = 0;
9429            return false;
9430          }
9431          if (!getJid().isInitialized()) {
9432            memoizedIsInitialized = 0;
9433            return false;
9434          }
9435          memoizedIsInitialized = 1;
9436          return true;
9437        }
9438    
9439        public void writeTo(com.google.protobuf.CodedOutputStream output)
9440                            throws java.io.IOException {
9441          getSerializedSize();
9442          if (((bitField0_ & 0x00000001) == 0x00000001)) {
9443            output.writeMessage(1, jid_);
9444          }
9445          if (((bitField0_ & 0x00000002) == 0x00000002)) {
9446            output.writeUInt64(2, startTxId_);
9447          }
9448          getUnknownFields().writeTo(output);
9449        }
9450    
9451        private int memoizedSerializedSize = -1;
9452        public int getSerializedSize() {
9453          int size = memoizedSerializedSize;
9454          if (size != -1) return size;
9455    
9456          size = 0;
9457          if (((bitField0_ & 0x00000001) == 0x00000001)) {
9458            size += com.google.protobuf.CodedOutputStream
9459              .computeMessageSize(1, jid_);
9460          }
9461          if (((bitField0_ & 0x00000002) == 0x00000002)) {
9462            size += com.google.protobuf.CodedOutputStream
9463              .computeUInt64Size(2, startTxId_);
9464          }
9465          size += getUnknownFields().getSerializedSize();
9466          memoizedSerializedSize = size;
9467          return size;
9468        }
9469    
9470        private static final long serialVersionUID = 0L;
9471        @java.lang.Override
9472        protected java.lang.Object writeReplace()
9473            throws java.io.ObjectStreamException {
9474          return super.writeReplace();
9475        }
9476    
9477        @java.lang.Override
9478        public boolean equals(final java.lang.Object obj) {
9479          if (obj == this) {
9480           return true;
9481          }
9482          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto)) {
9483            return super.equals(obj);
9484          }
9485          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto) obj;
9486    
9487          boolean result = true;
9488          result = result && (hasJid() == other.hasJid());
9489          if (hasJid()) {
9490            result = result && getJid()
9491                .equals(other.getJid());
9492          }
9493          result = result && (hasStartTxId() == other.hasStartTxId());
9494          if (hasStartTxId()) {
9495            result = result && (getStartTxId()
9496                == other.getStartTxId());
9497          }
9498          result = result &&
9499              getUnknownFields().equals(other.getUnknownFields());
9500          return result;
9501        }
9502    
9503        private int memoizedHashCode = 0;
9504        @java.lang.Override
9505        public int hashCode() {
9506          if (memoizedHashCode != 0) {
9507            return memoizedHashCode;
9508          }
9509          int hash = 41;
9510          hash = (19 * hash) + getDescriptorForType().hashCode();
9511          if (hasJid()) {
9512            hash = (37 * hash) + JID_FIELD_NUMBER;
9513            hash = (53 * hash) + getJid().hashCode();
9514          }
9515          if (hasStartTxId()) {
9516            hash = (37 * hash) + STARTTXID_FIELD_NUMBER;
9517            hash = (53 * hash) + hashLong(getStartTxId());
9518          }
9519          hash = (29 * hash) + getUnknownFields().hashCode();
9520          memoizedHashCode = hash;
9521          return hash;
9522        }
9523    
9524        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto parseFrom(
9525            com.google.protobuf.ByteString data)
9526            throws com.google.protobuf.InvalidProtocolBufferException {
9527          return PARSER.parseFrom(data);
9528        }
9529        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto parseFrom(
9530            com.google.protobuf.ByteString data,
9531            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9532            throws com.google.protobuf.InvalidProtocolBufferException {
9533          return PARSER.parseFrom(data, extensionRegistry);
9534        }
9535        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto parseFrom(byte[] data)
9536            throws com.google.protobuf.InvalidProtocolBufferException {
9537          return PARSER.parseFrom(data);
9538        }
9539        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto parseFrom(
9540            byte[] data,
9541            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9542            throws com.google.protobuf.InvalidProtocolBufferException {
9543          return PARSER.parseFrom(data, extensionRegistry);
9544        }
9545        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto parseFrom(java.io.InputStream input)
9546            throws java.io.IOException {
9547          return PARSER.parseFrom(input);
9548        }
9549        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto parseFrom(
9550            java.io.InputStream input,
9551            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9552            throws java.io.IOException {
9553          return PARSER.parseFrom(input, extensionRegistry);
9554        }
9555        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto parseDelimitedFrom(java.io.InputStream input)
9556            throws java.io.IOException {
9557          return PARSER.parseDelimitedFrom(input);
9558        }
9559        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto parseDelimitedFrom(
9560            java.io.InputStream input,
9561            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9562            throws java.io.IOException {
9563          return PARSER.parseDelimitedFrom(input, extensionRegistry);
9564        }
9565        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto parseFrom(
9566            com.google.protobuf.CodedInputStream input)
9567            throws java.io.IOException {
9568          return PARSER.parseFrom(input);
9569        }
9570        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto parseFrom(
9571            com.google.protobuf.CodedInputStream input,
9572            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9573            throws java.io.IOException {
9574          return PARSER.parseFrom(input, extensionRegistry);
9575        }
9576    
9577        public static Builder newBuilder() { return Builder.create(); }
9578        public Builder newBuilderForType() { return newBuilder(); }
9579        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto prototype) {
9580          return newBuilder().mergeFrom(prototype);
9581        }
9582        public Builder toBuilder() { return newBuilder(this); }
9583    
9584        @java.lang.Override
9585        protected Builder newBuilderForType(
9586            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
9587          Builder builder = new Builder(parent);
9588          return builder;
9589        }
9590        /**
9591         * Protobuf type {@code hadoop.hdfs.qjournal.DiscardSegmentsRequestProto}
9592         *
9593         * <pre>
9594         **
9595         * discardSegments()
9596         * </pre>
9597         */
9598        public static final class Builder extends
9599            com.google.protobuf.GeneratedMessage.Builder<Builder>
9600           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProtoOrBuilder {
9601          public static final com.google.protobuf.Descriptors.Descriptor
9602              getDescriptor() {
9603            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DiscardSegmentsRequestProto_descriptor;
9604          }
9605    
9606          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
9607              internalGetFieldAccessorTable() {
9608            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DiscardSegmentsRequestProto_fieldAccessorTable
9609                .ensureFieldAccessorsInitialized(
9610                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto.Builder.class);
9611          }
9612    
9613          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto.newBuilder()
9614          private Builder() {
9615            maybeForceBuilderInitialization();
9616          }
9617    
9618          private Builder(
9619              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
9620            super(parent);
9621            maybeForceBuilderInitialization();
9622          }
9623          private void maybeForceBuilderInitialization() {
9624            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
9625              getJidFieldBuilder();
9626            }
9627          }
9628          private static Builder create() {
9629            return new Builder();
9630          }
9631    
9632          public Builder clear() {
9633            super.clear();
9634            if (jidBuilder_ == null) {
9635              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
9636            } else {
9637              jidBuilder_.clear();
9638            }
9639            bitField0_ = (bitField0_ & ~0x00000001);
9640            startTxId_ = 0L;
9641            bitField0_ = (bitField0_ & ~0x00000002);
9642            return this;
9643          }
9644    
9645          public Builder clone() {
9646            return create().mergeFrom(buildPartial());
9647          }
9648    
9649          public com.google.protobuf.Descriptors.Descriptor
9650              getDescriptorForType() {
9651            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DiscardSegmentsRequestProto_descriptor;
9652          }
9653    
9654          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto getDefaultInstanceForType() {
9655            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto.getDefaultInstance();
9656          }
9657    
9658          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto build() {
9659            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto result = buildPartial();
9660            if (!result.isInitialized()) {
9661              throw newUninitializedMessageException(result);
9662            }
9663            return result;
9664          }
9665    
9666          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto buildPartial() {
9667            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto(this);
9668            int from_bitField0_ = bitField0_;
9669            int to_bitField0_ = 0;
9670            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
9671              to_bitField0_ |= 0x00000001;
9672            }
9673            if (jidBuilder_ == null) {
9674              result.jid_ = jid_;
9675            } else {
9676              result.jid_ = jidBuilder_.build();
9677            }
9678            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
9679              to_bitField0_ |= 0x00000002;
9680            }
9681            result.startTxId_ = startTxId_;
9682            result.bitField0_ = to_bitField0_;
9683            onBuilt();
9684            return result;
9685          }
9686    
9687          public Builder mergeFrom(com.google.protobuf.Message other) {
9688            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto) {
9689              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto)other);
9690            } else {
9691              super.mergeFrom(other);
9692              return this;
9693            }
9694          }
9695    
9696          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto other) {
9697            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto.getDefaultInstance()) return this;
9698            if (other.hasJid()) {
9699              mergeJid(other.getJid());
9700            }
9701            if (other.hasStartTxId()) {
9702              setStartTxId(other.getStartTxId());
9703            }
9704            this.mergeUnknownFields(other.getUnknownFields());
9705            return this;
9706          }
9707    
9708          public final boolean isInitialized() {
9709            if (!hasJid()) {
9710              
9711              return false;
9712            }
9713            if (!hasStartTxId()) {
9714              
9715              return false;
9716            }
9717            if (!getJid().isInitialized()) {
9718              
9719              return false;
9720            }
9721            return true;
9722          }
9723    
9724          public Builder mergeFrom(
9725              com.google.protobuf.CodedInputStream input,
9726              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9727              throws java.io.IOException {
9728            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto parsedMessage = null;
9729            try {
9730              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
9731            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
9732              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto) e.getUnfinishedMessage();
9733              throw e;
9734            } finally {
9735              if (parsedMessage != null) {
9736                mergeFrom(parsedMessage);
9737              }
9738            }
9739            return this;
9740          }
9741          private int bitField0_;
9742    
9743          // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
9744          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
9745          private com.google.protobuf.SingleFieldBuilder<
9746              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
9747          /**
9748           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
9749           */
9750          public boolean hasJid() {
9751            return ((bitField0_ & 0x00000001) == 0x00000001);
9752          }
9753          /**
9754           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
9755           */
9756          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
9757            if (jidBuilder_ == null) {
9758              return jid_;
9759            } else {
9760              return jidBuilder_.getMessage();
9761            }
9762          }
9763          /**
9764           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
9765           */
9766          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
9767            if (jidBuilder_ == null) {
9768              if (value == null) {
9769                throw new NullPointerException();
9770              }
9771              jid_ = value;
9772              onChanged();
9773            } else {
9774              jidBuilder_.setMessage(value);
9775            }
9776            bitField0_ |= 0x00000001;
9777            return this;
9778          }
9779          /**
9780           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
9781           */
9782          public Builder setJid(
9783              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
9784            if (jidBuilder_ == null) {
9785              jid_ = builderForValue.build();
9786              onChanged();
9787            } else {
9788              jidBuilder_.setMessage(builderForValue.build());
9789            }
9790            bitField0_ |= 0x00000001;
9791            return this;
9792          }
9793          /**
9794           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
9795           */
9796          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
9797            if (jidBuilder_ == null) {
9798              if (((bitField0_ & 0x00000001) == 0x00000001) &&
9799                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
9800                jid_ =
9801                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
9802              } else {
9803                jid_ = value;
9804              }
9805              onChanged();
9806            } else {
9807              jidBuilder_.mergeFrom(value);
9808            }
9809            bitField0_ |= 0x00000001;
9810            return this;
9811          }
9812          /**
9813           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
9814           */
9815          public Builder clearJid() {
9816            if (jidBuilder_ == null) {
9817              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
9818              onChanged();
9819            } else {
9820              jidBuilder_.clear();
9821            }
9822            bitField0_ = (bitField0_ & ~0x00000001);
9823            return this;
9824          }
9825          /**
9826           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
9827           */
9828          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
9829            bitField0_ |= 0x00000001;
9830            onChanged();
9831            return getJidFieldBuilder().getBuilder();
9832          }
9833          /**
9834           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
9835           */
9836          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
9837            if (jidBuilder_ != null) {
9838              return jidBuilder_.getMessageOrBuilder();
9839            } else {
9840              return jid_;
9841            }
9842          }
9843          /**
9844           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
9845           */
9846          private com.google.protobuf.SingleFieldBuilder<
9847              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
9848              getJidFieldBuilder() {
9849            if (jidBuilder_ == null) {
9850              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
9851                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
9852                      jid_,
9853                      getParentForChildren(),
9854                      isClean());
9855              jid_ = null;
9856            }
9857            return jidBuilder_;
9858          }
9859    
9860          // required uint64 startTxId = 2;
9861          private long startTxId_ ;
9862          /**
9863           * <code>required uint64 startTxId = 2;</code>
9864           */
9865          public boolean hasStartTxId() {
9866            return ((bitField0_ & 0x00000002) == 0x00000002);
9867          }
9868          /**
9869           * <code>required uint64 startTxId = 2;</code>
9870           */
9871          public long getStartTxId() {
9872            return startTxId_;
9873          }
9874          /**
9875           * <code>required uint64 startTxId = 2;</code>
9876           */
9877          public Builder setStartTxId(long value) {
9878            bitField0_ |= 0x00000002;
9879            startTxId_ = value;
9880            onChanged();
9881            return this;
9882          }
9883          /**
9884           * <code>required uint64 startTxId = 2;</code>
9885           */
9886          public Builder clearStartTxId() {
9887            bitField0_ = (bitField0_ & ~0x00000002);
9888            startTxId_ = 0L;
9889            onChanged();
9890            return this;
9891          }
9892    
9893          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.DiscardSegmentsRequestProto)
9894        }
9895    
9896        static {
9897          defaultInstance = new DiscardSegmentsRequestProto(true);
9898          defaultInstance.initFields();
9899        }
9900    
9901        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.DiscardSegmentsRequestProto)
9902      }
9903    
9904      public interface DiscardSegmentsResponseProtoOrBuilder
9905          extends com.google.protobuf.MessageOrBuilder {
9906      }
9907      /**
9908       * Protobuf type {@code hadoop.hdfs.qjournal.DiscardSegmentsResponseProto}
9909       */
9910      public static final class DiscardSegmentsResponseProto extends
9911          com.google.protobuf.GeneratedMessage
9912          implements DiscardSegmentsResponseProtoOrBuilder {
9913        // Use DiscardSegmentsResponseProto.newBuilder() to construct.
9914        private DiscardSegmentsResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
9915          super(builder);
9916          this.unknownFields = builder.getUnknownFields();
9917        }
9918        private DiscardSegmentsResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
9919    
9920        private static final DiscardSegmentsResponseProto defaultInstance;
9921        public static DiscardSegmentsResponseProto getDefaultInstance() {
9922          return defaultInstance;
9923        }
9924    
9925        public DiscardSegmentsResponseProto getDefaultInstanceForType() {
9926          return defaultInstance;
9927        }
9928    
9929        private final com.google.protobuf.UnknownFieldSet unknownFields;
9930        @java.lang.Override
9931        public final com.google.protobuf.UnknownFieldSet
9932            getUnknownFields() {
9933          return this.unknownFields;
9934        }
9935        private DiscardSegmentsResponseProto(
9936            com.google.protobuf.CodedInputStream input,
9937            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9938            throws com.google.protobuf.InvalidProtocolBufferException {
9939          initFields();
9940          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
9941              com.google.protobuf.UnknownFieldSet.newBuilder();
9942          try {
9943            boolean done = false;
9944            while (!done) {
9945              int tag = input.readTag();
9946              switch (tag) {
9947                case 0:
9948                  done = true;
9949                  break;
9950                default: {
9951                  if (!parseUnknownField(input, unknownFields,
9952                                         extensionRegistry, tag)) {
9953                    done = true;
9954                  }
9955                  break;
9956                }
9957              }
9958            }
9959          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
9960            throw e.setUnfinishedMessage(this);
9961          } catch (java.io.IOException e) {
9962            throw new com.google.protobuf.InvalidProtocolBufferException(
9963                e.getMessage()).setUnfinishedMessage(this);
9964          } finally {
9965            this.unknownFields = unknownFields.build();
9966            makeExtensionsImmutable();
9967          }
9968        }
9969        public static final com.google.protobuf.Descriptors.Descriptor
9970            getDescriptor() {
9971          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DiscardSegmentsResponseProto_descriptor;
9972        }
9973    
9974        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
9975            internalGetFieldAccessorTable() {
9976          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DiscardSegmentsResponseProto_fieldAccessorTable
9977              .ensureFieldAccessorsInitialized(
9978                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto.Builder.class);
9979        }
9980    
9981        public static com.google.protobuf.Parser<DiscardSegmentsResponseProto> PARSER =
9982            new com.google.protobuf.AbstractParser<DiscardSegmentsResponseProto>() {
9983          public DiscardSegmentsResponseProto parsePartialFrom(
9984              com.google.protobuf.CodedInputStream input,
9985              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9986              throws com.google.protobuf.InvalidProtocolBufferException {
9987            return new DiscardSegmentsResponseProto(input, extensionRegistry);
9988          }
9989        };
9990    
9991        @java.lang.Override
9992        public com.google.protobuf.Parser<DiscardSegmentsResponseProto> getParserForType() {
9993          return PARSER;
9994        }
9995    
9996        private void initFields() {
9997        }
9998        private byte memoizedIsInitialized = -1;
9999        public final boolean isInitialized() {
10000          byte isInitialized = memoizedIsInitialized;
10001          if (isInitialized != -1) return isInitialized == 1;
10002    
10003          memoizedIsInitialized = 1;
10004          return true;
10005        }
10006    
10007        public void writeTo(com.google.protobuf.CodedOutputStream output)
10008                            throws java.io.IOException {
10009          getSerializedSize();
10010          getUnknownFields().writeTo(output);
10011        }
10012    
10013        private int memoizedSerializedSize = -1;
10014        public int getSerializedSize() {
10015          int size = memoizedSerializedSize;
10016          if (size != -1) return size;
10017    
10018          size = 0;
10019          size += getUnknownFields().getSerializedSize();
10020          memoizedSerializedSize = size;
10021          return size;
10022        }
10023    
10024        private static final long serialVersionUID = 0L;
10025        @java.lang.Override
10026        protected java.lang.Object writeReplace()
10027            throws java.io.ObjectStreamException {
10028          return super.writeReplace();
10029        }
10030    
10031        @java.lang.Override
10032        public boolean equals(final java.lang.Object obj) {
10033          if (obj == this) {
10034           return true;
10035          }
10036          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto)) {
10037            return super.equals(obj);
10038          }
10039          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto) obj;
10040    
10041          boolean result = true;
10042          result = result &&
10043              getUnknownFields().equals(other.getUnknownFields());
10044          return result;
10045        }
10046    
10047        private int memoizedHashCode = 0;
10048        @java.lang.Override
10049        public int hashCode() {
10050          if (memoizedHashCode != 0) {
10051            return memoizedHashCode;
10052          }
10053          int hash = 41;
10054          hash = (19 * hash) + getDescriptorForType().hashCode();
10055          hash = (29 * hash) + getUnknownFields().hashCode();
10056          memoizedHashCode = hash;
10057          return hash;
10058        }
10059    
10060        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto parseFrom(
10061            com.google.protobuf.ByteString data)
10062            throws com.google.protobuf.InvalidProtocolBufferException {
10063          return PARSER.parseFrom(data);
10064        }
10065        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto parseFrom(
10066            com.google.protobuf.ByteString data,
10067            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10068            throws com.google.protobuf.InvalidProtocolBufferException {
10069          return PARSER.parseFrom(data, extensionRegistry);
10070        }
10071        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto parseFrom(byte[] data)
10072            throws com.google.protobuf.InvalidProtocolBufferException {
10073          return PARSER.parseFrom(data);
10074        }
10075        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto parseFrom(
10076            byte[] data,
10077            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10078            throws com.google.protobuf.InvalidProtocolBufferException {
10079          return PARSER.parseFrom(data, extensionRegistry);
10080        }
10081        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto parseFrom(java.io.InputStream input)
10082            throws java.io.IOException {
10083          return PARSER.parseFrom(input);
10084        }
10085        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto parseFrom(
10086            java.io.InputStream input,
10087            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10088            throws java.io.IOException {
10089          return PARSER.parseFrom(input, extensionRegistry);
10090        }
10091        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto parseDelimitedFrom(java.io.InputStream input)
10092            throws java.io.IOException {
10093          return PARSER.parseDelimitedFrom(input);
10094        }
10095        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto parseDelimitedFrom(
10096            java.io.InputStream input,
10097            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10098            throws java.io.IOException {
10099          return PARSER.parseDelimitedFrom(input, extensionRegistry);
10100        }
10101        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto parseFrom(
10102            com.google.protobuf.CodedInputStream input)
10103            throws java.io.IOException {
10104          return PARSER.parseFrom(input);
10105        }
10106        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto parseFrom(
10107            com.google.protobuf.CodedInputStream input,
10108            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10109            throws java.io.IOException {
10110          return PARSER.parseFrom(input, extensionRegistry);
10111        }
10112    
10113        public static Builder newBuilder() { return Builder.create(); }
10114        public Builder newBuilderForType() { return newBuilder(); }
10115        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto prototype) {
10116          return newBuilder().mergeFrom(prototype);
10117        }
10118        public Builder toBuilder() { return newBuilder(this); }
10119    
10120        @java.lang.Override
10121        protected Builder newBuilderForType(
10122            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
10123          Builder builder = new Builder(parent);
10124          return builder;
10125        }
10126        /**
10127         * Protobuf type {@code hadoop.hdfs.qjournal.DiscardSegmentsResponseProto}
10128         */
10129        public static final class Builder extends
10130            com.google.protobuf.GeneratedMessage.Builder<Builder>
10131           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProtoOrBuilder {
10132          public static final com.google.protobuf.Descriptors.Descriptor
10133              getDescriptor() {
10134            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DiscardSegmentsResponseProto_descriptor;
10135          }
10136    
10137          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
10138              internalGetFieldAccessorTable() {
10139            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DiscardSegmentsResponseProto_fieldAccessorTable
10140                .ensureFieldAccessorsInitialized(
10141                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto.Builder.class);
10142          }
10143    
10144          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto.newBuilder()
10145          private Builder() {
10146            maybeForceBuilderInitialization();
10147          }
10148    
10149          private Builder(
10150              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
10151            super(parent);
10152            maybeForceBuilderInitialization();
10153          }
10154          private void maybeForceBuilderInitialization() {
10155            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
10156            }
10157          }
10158          private static Builder create() {
10159            return new Builder();
10160          }
10161    
10162          public Builder clear() {
10163            super.clear();
10164            return this;
10165          }
10166    
10167          public Builder clone() {
10168            return create().mergeFrom(buildPartial());
10169          }
10170    
10171          public com.google.protobuf.Descriptors.Descriptor
10172              getDescriptorForType() {
10173            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DiscardSegmentsResponseProto_descriptor;
10174          }
10175    
10176          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto getDefaultInstanceForType() {
10177            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto.getDefaultInstance();
10178          }
10179    
10180          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto build() {
10181            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto result = buildPartial();
10182            if (!result.isInitialized()) {
10183              throw newUninitializedMessageException(result);
10184            }
10185            return result;
10186          }
10187    
10188          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto buildPartial() {
10189            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto(this);
10190            onBuilt();
10191            return result;
10192          }
10193    
10194          public Builder mergeFrom(com.google.protobuf.Message other) {
10195            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto) {
10196              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto)other);
10197            } else {
10198              super.mergeFrom(other);
10199              return this;
10200            }
10201          }
10202    
10203          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto other) {
10204            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto.getDefaultInstance()) return this;
10205            this.mergeUnknownFields(other.getUnknownFields());
10206            return this;
10207          }
10208    
10209          public final boolean isInitialized() {
10210            return true;
10211          }
10212    
10213          public Builder mergeFrom(
10214              com.google.protobuf.CodedInputStream input,
10215              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10216              throws java.io.IOException {
10217            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto parsedMessage = null;
10218            try {
10219              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
10220            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
10221              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto) e.getUnfinishedMessage();
10222              throw e;
10223            } finally {
10224              if (parsedMessage != null) {
10225                mergeFrom(parsedMessage);
10226              }
10227            }
10228            return this;
10229          }
10230    
10231          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.DiscardSegmentsResponseProto)
10232        }
10233    
10234        static {
10235          defaultInstance = new DiscardSegmentsResponseProto(true);
10236          defaultInstance.initFields();
10237        }
10238    
10239        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.DiscardSegmentsResponseProto)
10240      }
10241    
10242      public interface GetJournalCTimeRequestProtoOrBuilder
10243          extends com.google.protobuf.MessageOrBuilder {
10244    
10245        // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
10246        /**
10247         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
10248         */
10249        boolean hasJid();
10250        /**
10251         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
10252         */
10253        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
10254        /**
10255         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
10256         */
10257        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
10258      }
10259      /**
10260       * Protobuf type {@code hadoop.hdfs.qjournal.GetJournalCTimeRequestProto}
10261       *
10262       * <pre>
10263       **
10264       * getJournalCTime()
10265       * </pre>
10266       */
10267      public static final class GetJournalCTimeRequestProto extends
10268          com.google.protobuf.GeneratedMessage
10269          implements GetJournalCTimeRequestProtoOrBuilder {
10270        // Use GetJournalCTimeRequestProto.newBuilder() to construct.
10271        private GetJournalCTimeRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
10272          super(builder);
10273          this.unknownFields = builder.getUnknownFields();
10274        }
10275        private GetJournalCTimeRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
10276    
10277        private static final GetJournalCTimeRequestProto defaultInstance;
10278        public static GetJournalCTimeRequestProto getDefaultInstance() {
10279          return defaultInstance;
10280        }
10281    
10282        public GetJournalCTimeRequestProto getDefaultInstanceForType() {
10283          return defaultInstance;
10284        }
10285    
10286        private final com.google.protobuf.UnknownFieldSet unknownFields;
10287        @java.lang.Override
10288        public final com.google.protobuf.UnknownFieldSet
10289            getUnknownFields() {
10290          return this.unknownFields;
10291        }
10292        private GetJournalCTimeRequestProto(
10293            com.google.protobuf.CodedInputStream input,
10294            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10295            throws com.google.protobuf.InvalidProtocolBufferException {
10296          initFields();
10297          int mutable_bitField0_ = 0;
10298          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
10299              com.google.protobuf.UnknownFieldSet.newBuilder();
10300          try {
10301            boolean done = false;
10302            while (!done) {
10303              int tag = input.readTag();
10304              switch (tag) {
10305                case 0:
10306                  done = true;
10307                  break;
10308                default: {
10309                  if (!parseUnknownField(input, unknownFields,
10310                                         extensionRegistry, tag)) {
10311                    done = true;
10312                  }
10313                  break;
10314                }
10315                case 10: {
10316                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
10317                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
10318                    subBuilder = jid_.toBuilder();
10319                  }
10320                  jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
10321                  if (subBuilder != null) {
10322                    subBuilder.mergeFrom(jid_);
10323                    jid_ = subBuilder.buildPartial();
10324                  }
10325                  bitField0_ |= 0x00000001;
10326                  break;
10327                }
10328              }
10329            }
10330          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
10331            throw e.setUnfinishedMessage(this);
10332          } catch (java.io.IOException e) {
10333            throw new com.google.protobuf.InvalidProtocolBufferException(
10334                e.getMessage()).setUnfinishedMessage(this);
10335          } finally {
10336            this.unknownFields = unknownFields.build();
10337            makeExtensionsImmutable();
10338          }
10339        }
10340        public static final com.google.protobuf.Descriptors.Descriptor
10341            getDescriptor() {
10342          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalCTimeRequestProto_descriptor;
10343        }
10344    
10345        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
10346            internalGetFieldAccessorTable() {
10347          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalCTimeRequestProto_fieldAccessorTable
10348              .ensureFieldAccessorsInitialized(
10349                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto.Builder.class);
10350        }
10351    
10352        public static com.google.protobuf.Parser<GetJournalCTimeRequestProto> PARSER =
10353            new com.google.protobuf.AbstractParser<GetJournalCTimeRequestProto>() {
10354          public GetJournalCTimeRequestProto parsePartialFrom(
10355              com.google.protobuf.CodedInputStream input,
10356              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10357              throws com.google.protobuf.InvalidProtocolBufferException {
10358            return new GetJournalCTimeRequestProto(input, extensionRegistry);
10359          }
10360        };
10361    
10362        @java.lang.Override
10363        public com.google.protobuf.Parser<GetJournalCTimeRequestProto> getParserForType() {
10364          return PARSER;
10365        }
10366    
10367        private int bitField0_;
10368        // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
10369        public static final int JID_FIELD_NUMBER = 1;
10370        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
10371        /**
10372         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
10373         */
10374        public boolean hasJid() {
10375          return ((bitField0_ & 0x00000001) == 0x00000001);
10376        }
10377        /**
10378         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
10379         */
10380        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
10381          return jid_;
10382        }
10383        /**
10384         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
10385         */
10386        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
10387          return jid_;
10388        }
10389    
10390        private void initFields() {
10391          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
10392        }
10393        private byte memoizedIsInitialized = -1;
10394        public final boolean isInitialized() {
10395          byte isInitialized = memoizedIsInitialized;
10396          if (isInitialized != -1) return isInitialized == 1;
10397    
10398          if (!hasJid()) {
10399            memoizedIsInitialized = 0;
10400            return false;
10401          }
10402          if (!getJid().isInitialized()) {
10403            memoizedIsInitialized = 0;
10404            return false;
10405          }
10406          memoizedIsInitialized = 1;
10407          return true;
10408        }
10409    
10410        public void writeTo(com.google.protobuf.CodedOutputStream output)
10411                            throws java.io.IOException {
10412          getSerializedSize();
10413          if (((bitField0_ & 0x00000001) == 0x00000001)) {
10414            output.writeMessage(1, jid_);
10415          }
10416          getUnknownFields().writeTo(output);
10417        }
10418    
10419        private int memoizedSerializedSize = -1;
10420        public int getSerializedSize() {
10421          int size = memoizedSerializedSize;
10422          if (size != -1) return size;
10423    
10424          size = 0;
10425          if (((bitField0_ & 0x00000001) == 0x00000001)) {
10426            size += com.google.protobuf.CodedOutputStream
10427              .computeMessageSize(1, jid_);
10428          }
10429          size += getUnknownFields().getSerializedSize();
10430          memoizedSerializedSize = size;
10431          return size;
10432        }
10433    
10434        private static final long serialVersionUID = 0L;
10435        @java.lang.Override
10436        protected java.lang.Object writeReplace()
10437            throws java.io.ObjectStreamException {
10438          return super.writeReplace();
10439        }
10440    
10441        @java.lang.Override
10442        public boolean equals(final java.lang.Object obj) {
10443          if (obj == this) {
10444           return true;
10445          }
10446          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto)) {
10447            return super.equals(obj);
10448          }
10449          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto) obj;
10450    
10451          boolean result = true;
10452          result = result && (hasJid() == other.hasJid());
10453          if (hasJid()) {
10454            result = result && getJid()
10455                .equals(other.getJid());
10456          }
10457          result = result &&
10458              getUnknownFields().equals(other.getUnknownFields());
10459          return result;
10460        }
10461    
10462        private int memoizedHashCode = 0;
10463        @java.lang.Override
10464        public int hashCode() {
10465          if (memoizedHashCode != 0) {
10466            return memoizedHashCode;
10467          }
10468          int hash = 41;
10469          hash = (19 * hash) + getDescriptorForType().hashCode();
10470          if (hasJid()) {
10471            hash = (37 * hash) + JID_FIELD_NUMBER;
10472            hash = (53 * hash) + getJid().hashCode();
10473          }
10474          hash = (29 * hash) + getUnknownFields().hashCode();
10475          memoizedHashCode = hash;
10476          return hash;
10477        }
10478    
10479        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto parseFrom(
10480            com.google.protobuf.ByteString data)
10481            throws com.google.protobuf.InvalidProtocolBufferException {
10482          return PARSER.parseFrom(data);
10483        }
10484        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto parseFrom(
10485            com.google.protobuf.ByteString data,
10486            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10487            throws com.google.protobuf.InvalidProtocolBufferException {
10488          return PARSER.parseFrom(data, extensionRegistry);
10489        }
10490        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto parseFrom(byte[] data)
10491            throws com.google.protobuf.InvalidProtocolBufferException {
10492          return PARSER.parseFrom(data);
10493        }
10494        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto parseFrom(
10495            byte[] data,
10496            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10497            throws com.google.protobuf.InvalidProtocolBufferException {
10498          return PARSER.parseFrom(data, extensionRegistry);
10499        }
10500        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto parseFrom(java.io.InputStream input)
10501            throws java.io.IOException {
10502          return PARSER.parseFrom(input);
10503        }
10504        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto parseFrom(
10505            java.io.InputStream input,
10506            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10507            throws java.io.IOException {
10508          return PARSER.parseFrom(input, extensionRegistry);
10509        }
10510        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto parseDelimitedFrom(java.io.InputStream input)
10511            throws java.io.IOException {
10512          return PARSER.parseDelimitedFrom(input);
10513        }
10514        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto parseDelimitedFrom(
10515            java.io.InputStream input,
10516            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10517            throws java.io.IOException {
10518          return PARSER.parseDelimitedFrom(input, extensionRegistry);
10519        }
10520        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto parseFrom(
10521            com.google.protobuf.CodedInputStream input)
10522            throws java.io.IOException {
10523          return PARSER.parseFrom(input);
10524        }
10525        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto parseFrom(
10526            com.google.protobuf.CodedInputStream input,
10527            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10528            throws java.io.IOException {
10529          return PARSER.parseFrom(input, extensionRegistry);
10530        }
10531    
10532        public static Builder newBuilder() { return Builder.create(); }
10533        public Builder newBuilderForType() { return newBuilder(); }
10534        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto prototype) {
10535          return newBuilder().mergeFrom(prototype);
10536        }
10537        public Builder toBuilder() { return newBuilder(this); }
10538    
10539        @java.lang.Override
10540        protected Builder newBuilderForType(
10541            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
10542          Builder builder = new Builder(parent);
10543          return builder;
10544        }
10545        /**
10546         * Protobuf type {@code hadoop.hdfs.qjournal.GetJournalCTimeRequestProto}
10547         *
10548         * <pre>
10549         **
10550         * getJournalCTime()
10551         * </pre>
10552         */
10553        public static final class Builder extends
10554            com.google.protobuf.GeneratedMessage.Builder<Builder>
10555           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProtoOrBuilder {
10556          public static final com.google.protobuf.Descriptors.Descriptor
10557              getDescriptor() {
10558            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalCTimeRequestProto_descriptor;
10559          }
10560    
10561          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
10562              internalGetFieldAccessorTable() {
10563            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalCTimeRequestProto_fieldAccessorTable
10564                .ensureFieldAccessorsInitialized(
10565                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto.Builder.class);
10566          }
10567    
10568          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto.newBuilder()
10569          private Builder() {
10570            maybeForceBuilderInitialization();
10571          }
10572    
10573          private Builder(
10574              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
10575            super(parent);
10576            maybeForceBuilderInitialization();
10577          }
10578          private void maybeForceBuilderInitialization() {
10579            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
10580              getJidFieldBuilder();
10581            }
10582          }
10583          private static Builder create() {
10584            return new Builder();
10585          }
10586    
10587          public Builder clear() {
10588            super.clear();
10589            if (jidBuilder_ == null) {
10590              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
10591            } else {
10592              jidBuilder_.clear();
10593            }
10594            bitField0_ = (bitField0_ & ~0x00000001);
10595            return this;
10596          }
10597    
10598          public Builder clone() {
10599            return create().mergeFrom(buildPartial());
10600          }
10601    
10602          public com.google.protobuf.Descriptors.Descriptor
10603              getDescriptorForType() {
10604            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalCTimeRequestProto_descriptor;
10605          }
10606    
10607          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto getDefaultInstanceForType() {
10608            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto.getDefaultInstance();
10609          }
10610    
10611          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto build() {
10612            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto result = buildPartial();
10613            if (!result.isInitialized()) {
10614              throw newUninitializedMessageException(result);
10615            }
10616            return result;
10617          }
10618    
10619          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto buildPartial() {
10620            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto(this);
10621            int from_bitField0_ = bitField0_;
10622            int to_bitField0_ = 0;
10623            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
10624              to_bitField0_ |= 0x00000001;
10625            }
10626            if (jidBuilder_ == null) {
10627              result.jid_ = jid_;
10628            } else {
10629              result.jid_ = jidBuilder_.build();
10630            }
10631            result.bitField0_ = to_bitField0_;
10632            onBuilt();
10633            return result;
10634          }
10635    
10636          public Builder mergeFrom(com.google.protobuf.Message other) {
10637            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto) {
10638              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto)other);
10639            } else {
10640              super.mergeFrom(other);
10641              return this;
10642            }
10643          }
10644    
10645          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto other) {
10646            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto.getDefaultInstance()) return this;
10647            if (other.hasJid()) {
10648              mergeJid(other.getJid());
10649            }
10650            this.mergeUnknownFields(other.getUnknownFields());
10651            return this;
10652          }
10653    
10654          public final boolean isInitialized() {
10655            if (!hasJid()) {
10656              
10657              return false;
10658            }
10659            if (!getJid().isInitialized()) {
10660              
10661              return false;
10662            }
10663            return true;
10664          }
10665    
10666          public Builder mergeFrom(
10667              com.google.protobuf.CodedInputStream input,
10668              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10669              throws java.io.IOException {
10670            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto parsedMessage = null;
10671            try {
10672              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
10673            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
10674              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto) e.getUnfinishedMessage();
10675              throw e;
10676            } finally {
10677              if (parsedMessage != null) {
10678                mergeFrom(parsedMessage);
10679              }
10680            }
10681            return this;
10682          }
10683          private int bitField0_;
10684    
10685          // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
10686          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
10687          private com.google.protobuf.SingleFieldBuilder<
10688              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
10689          /**
10690           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
10691           */
10692          public boolean hasJid() {
10693            return ((bitField0_ & 0x00000001) == 0x00000001);
10694          }
10695          /**
10696           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
10697           */
10698          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
10699            if (jidBuilder_ == null) {
10700              return jid_;
10701            } else {
10702              return jidBuilder_.getMessage();
10703            }
10704          }
10705          /**
10706           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
10707           */
10708          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
10709            if (jidBuilder_ == null) {
10710              if (value == null) {
10711                throw new NullPointerException();
10712              }
10713              jid_ = value;
10714              onChanged();
10715            } else {
10716              jidBuilder_.setMessage(value);
10717            }
10718            bitField0_ |= 0x00000001;
10719            return this;
10720          }
10721          /**
10722           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
10723           */
10724          public Builder setJid(
10725              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
10726            if (jidBuilder_ == null) {
10727              jid_ = builderForValue.build();
10728              onChanged();
10729            } else {
10730              jidBuilder_.setMessage(builderForValue.build());
10731            }
10732            bitField0_ |= 0x00000001;
10733            return this;
10734          }
10735          /**
10736           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
10737           */
10738          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
10739            if (jidBuilder_ == null) {
10740              if (((bitField0_ & 0x00000001) == 0x00000001) &&
10741                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
10742                jid_ =
10743                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
10744              } else {
10745                jid_ = value;
10746              }
10747              onChanged();
10748            } else {
10749              jidBuilder_.mergeFrom(value);
10750            }
10751            bitField0_ |= 0x00000001;
10752            return this;
10753          }
10754          /**
10755           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
10756           */
10757          public Builder clearJid() {
10758            if (jidBuilder_ == null) {
10759              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
10760              onChanged();
10761            } else {
10762              jidBuilder_.clear();
10763            }
10764            bitField0_ = (bitField0_ & ~0x00000001);
10765            return this;
10766          }
10767          /**
10768           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
10769           */
10770          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
10771            bitField0_ |= 0x00000001;
10772            onChanged();
10773            return getJidFieldBuilder().getBuilder();
10774          }
10775          /**
10776           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
10777           */
10778          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
10779            if (jidBuilder_ != null) {
10780              return jidBuilder_.getMessageOrBuilder();
10781            } else {
10782              return jid_;
10783            }
10784          }
10785          /**
10786           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
10787           */
10788          private com.google.protobuf.SingleFieldBuilder<
10789              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
10790              getJidFieldBuilder() {
10791            if (jidBuilder_ == null) {
10792              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
10793                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
10794                      jid_,
10795                      getParentForChildren(),
10796                      isClean());
10797              jid_ = null;
10798            }
10799            return jidBuilder_;
10800          }
10801    
10802          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.GetJournalCTimeRequestProto)
10803        }
10804    
10805        static {
10806          defaultInstance = new GetJournalCTimeRequestProto(true);
10807          defaultInstance.initFields();
10808        }
10809    
10810        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.GetJournalCTimeRequestProto)
10811      }
10812    
10813      public interface GetJournalCTimeResponseProtoOrBuilder
10814          extends com.google.protobuf.MessageOrBuilder {
10815    
10816        // required int64 resultCTime = 1;
10817        /**
10818         * <code>required int64 resultCTime = 1;</code>
10819         */
10820        boolean hasResultCTime();
10821        /**
10822         * <code>required int64 resultCTime = 1;</code>
10823         */
10824        long getResultCTime();
10825      }
10826      /**
10827       * Protobuf type {@code hadoop.hdfs.qjournal.GetJournalCTimeResponseProto}
10828       */
10829      public static final class GetJournalCTimeResponseProto extends
10830          com.google.protobuf.GeneratedMessage
10831          implements GetJournalCTimeResponseProtoOrBuilder {
10832        // Use GetJournalCTimeResponseProto.newBuilder() to construct.
10833        private GetJournalCTimeResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
10834          super(builder);
10835          this.unknownFields = builder.getUnknownFields();
10836        }
10837        private GetJournalCTimeResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
10838    
10839        private static final GetJournalCTimeResponseProto defaultInstance;
10840        public static GetJournalCTimeResponseProto getDefaultInstance() {
10841          return defaultInstance;
10842        }
10843    
10844        public GetJournalCTimeResponseProto getDefaultInstanceForType() {
10845          return defaultInstance;
10846        }
10847    
10848        private final com.google.protobuf.UnknownFieldSet unknownFields;
10849        @java.lang.Override
10850        public final com.google.protobuf.UnknownFieldSet
10851            getUnknownFields() {
10852          return this.unknownFields;
10853        }
10854        private GetJournalCTimeResponseProto(
10855            com.google.protobuf.CodedInputStream input,
10856            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10857            throws com.google.protobuf.InvalidProtocolBufferException {
10858          initFields();
10859          int mutable_bitField0_ = 0;
10860          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
10861              com.google.protobuf.UnknownFieldSet.newBuilder();
10862          try {
10863            boolean done = false;
10864            while (!done) {
10865              int tag = input.readTag();
10866              switch (tag) {
10867                case 0:
10868                  done = true;
10869                  break;
10870                default: {
10871                  if (!parseUnknownField(input, unknownFields,
10872                                         extensionRegistry, tag)) {
10873                    done = true;
10874                  }
10875                  break;
10876                }
10877                case 8: {
10878                  bitField0_ |= 0x00000001;
10879                  resultCTime_ = input.readInt64();
10880                  break;
10881                }
10882              }
10883            }
10884          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
10885            throw e.setUnfinishedMessage(this);
10886          } catch (java.io.IOException e) {
10887            throw new com.google.protobuf.InvalidProtocolBufferException(
10888                e.getMessage()).setUnfinishedMessage(this);
10889          } finally {
10890            this.unknownFields = unknownFields.build();
10891            makeExtensionsImmutable();
10892          }
10893        }
10894        public static final com.google.protobuf.Descriptors.Descriptor
10895            getDescriptor() {
10896          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalCTimeResponseProto_descriptor;
10897        }
10898    
10899        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
10900            internalGetFieldAccessorTable() {
10901          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalCTimeResponseProto_fieldAccessorTable
10902              .ensureFieldAccessorsInitialized(
10903                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto.Builder.class);
10904        }
10905    
10906        public static com.google.protobuf.Parser<GetJournalCTimeResponseProto> PARSER =
10907            new com.google.protobuf.AbstractParser<GetJournalCTimeResponseProto>() {
10908          public GetJournalCTimeResponseProto parsePartialFrom(
10909              com.google.protobuf.CodedInputStream input,
10910              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10911              throws com.google.protobuf.InvalidProtocolBufferException {
10912            return new GetJournalCTimeResponseProto(input, extensionRegistry);
10913          }
10914        };
10915    
10916        @java.lang.Override
10917        public com.google.protobuf.Parser<GetJournalCTimeResponseProto> getParserForType() {
10918          return PARSER;
10919        }
10920    
10921        private int bitField0_;
10922        // required int64 resultCTime = 1;
10923        public static final int RESULTCTIME_FIELD_NUMBER = 1;
10924        private long resultCTime_;
10925        /**
10926         * <code>required int64 resultCTime = 1;</code>
10927         */
10928        public boolean hasResultCTime() {
10929          return ((bitField0_ & 0x00000001) == 0x00000001);
10930        }
10931        /**
10932         * <code>required int64 resultCTime = 1;</code>
10933         */
10934        public long getResultCTime() {
10935          return resultCTime_;
10936        }
10937    
10938        private void initFields() {
10939          resultCTime_ = 0L;
10940        }
10941        private byte memoizedIsInitialized = -1;
10942        public final boolean isInitialized() {
10943          byte isInitialized = memoizedIsInitialized;
10944          if (isInitialized != -1) return isInitialized == 1;
10945    
10946          if (!hasResultCTime()) {
10947            memoizedIsInitialized = 0;
10948            return false;
10949          }
10950          memoizedIsInitialized = 1;
10951          return true;
10952        }
10953    
10954        public void writeTo(com.google.protobuf.CodedOutputStream output)
10955                            throws java.io.IOException {
10956          getSerializedSize();
10957          if (((bitField0_ & 0x00000001) == 0x00000001)) {
10958            output.writeInt64(1, resultCTime_);
10959          }
10960          getUnknownFields().writeTo(output);
10961        }
10962    
10963        private int memoizedSerializedSize = -1;
10964        public int getSerializedSize() {
10965          int size = memoizedSerializedSize;
10966          if (size != -1) return size;
10967    
10968          size = 0;
10969          if (((bitField0_ & 0x00000001) == 0x00000001)) {
10970            size += com.google.protobuf.CodedOutputStream
10971              .computeInt64Size(1, resultCTime_);
10972          }
10973          size += getUnknownFields().getSerializedSize();
10974          memoizedSerializedSize = size;
10975          return size;
10976        }
10977    
10978        private static final long serialVersionUID = 0L;
10979        @java.lang.Override
10980        protected java.lang.Object writeReplace()
10981            throws java.io.ObjectStreamException {
10982          return super.writeReplace();
10983        }
10984    
10985        @java.lang.Override
10986        public boolean equals(final java.lang.Object obj) {
10987          if (obj == this) {
10988           return true;
10989          }
10990          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto)) {
10991            return super.equals(obj);
10992          }
10993          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto) obj;
10994    
10995          boolean result = true;
10996          result = result && (hasResultCTime() == other.hasResultCTime());
10997          if (hasResultCTime()) {
10998            result = result && (getResultCTime()
10999                == other.getResultCTime());
11000          }
11001          result = result &&
11002              getUnknownFields().equals(other.getUnknownFields());
11003          return result;
11004        }
11005    
11006        private int memoizedHashCode = 0;
11007        @java.lang.Override
11008        public int hashCode() {
11009          if (memoizedHashCode != 0) {
11010            return memoizedHashCode;
11011          }
11012          int hash = 41;
11013          hash = (19 * hash) + getDescriptorForType().hashCode();
11014          if (hasResultCTime()) {
11015            hash = (37 * hash) + RESULTCTIME_FIELD_NUMBER;
11016            hash = (53 * hash) + hashLong(getResultCTime());
11017          }
11018          hash = (29 * hash) + getUnknownFields().hashCode();
11019          memoizedHashCode = hash;
11020          return hash;
11021        }
11022    
11023        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto parseFrom(
11024            com.google.protobuf.ByteString data)
11025            throws com.google.protobuf.InvalidProtocolBufferException {
11026          return PARSER.parseFrom(data);
11027        }
11028        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto parseFrom(
11029            com.google.protobuf.ByteString data,
11030            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11031            throws com.google.protobuf.InvalidProtocolBufferException {
11032          return PARSER.parseFrom(data, extensionRegistry);
11033        }
11034        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto parseFrom(byte[] data)
11035            throws com.google.protobuf.InvalidProtocolBufferException {
11036          return PARSER.parseFrom(data);
11037        }
11038        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto parseFrom(
11039            byte[] data,
11040            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11041            throws com.google.protobuf.InvalidProtocolBufferException {
11042          return PARSER.parseFrom(data, extensionRegistry);
11043        }
11044        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto parseFrom(java.io.InputStream input)
11045            throws java.io.IOException {
11046          return PARSER.parseFrom(input);
11047        }
11048        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto parseFrom(
11049            java.io.InputStream input,
11050            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11051            throws java.io.IOException {
11052          return PARSER.parseFrom(input, extensionRegistry);
11053        }
11054        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto parseDelimitedFrom(java.io.InputStream input)
11055            throws java.io.IOException {
11056          return PARSER.parseDelimitedFrom(input);
11057        }
11058        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto parseDelimitedFrom(
11059            java.io.InputStream input,
11060            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11061            throws java.io.IOException {
11062          return PARSER.parseDelimitedFrom(input, extensionRegistry);
11063        }
11064        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto parseFrom(
11065            com.google.protobuf.CodedInputStream input)
11066            throws java.io.IOException {
11067          return PARSER.parseFrom(input);
11068        }
11069        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto parseFrom(
11070            com.google.protobuf.CodedInputStream input,
11071            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11072            throws java.io.IOException {
11073          return PARSER.parseFrom(input, extensionRegistry);
11074        }
11075    
11076        public static Builder newBuilder() { return Builder.create(); }
11077        public Builder newBuilderForType() { return newBuilder(); }
11078        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto prototype) {
11079          return newBuilder().mergeFrom(prototype);
11080        }
11081        public Builder toBuilder() { return newBuilder(this); }
11082    
11083        @java.lang.Override
11084        protected Builder newBuilderForType(
11085            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
11086          Builder builder = new Builder(parent);
11087          return builder;
11088        }
11089        /**
11090         * Protobuf type {@code hadoop.hdfs.qjournal.GetJournalCTimeResponseProto}
11091         */
11092        public static final class Builder extends
11093            com.google.protobuf.GeneratedMessage.Builder<Builder>
11094           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProtoOrBuilder {
11095          public static final com.google.protobuf.Descriptors.Descriptor
11096              getDescriptor() {
11097            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalCTimeResponseProto_descriptor;
11098          }
11099    
11100          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11101              internalGetFieldAccessorTable() {
11102            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalCTimeResponseProto_fieldAccessorTable
11103                .ensureFieldAccessorsInitialized(
11104                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto.Builder.class);
11105          }
11106    
11107          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto.newBuilder()
11108          private Builder() {
11109            maybeForceBuilderInitialization();
11110          }
11111    
11112          private Builder(
11113              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
11114            super(parent);
11115            maybeForceBuilderInitialization();
11116          }
11117          private void maybeForceBuilderInitialization() {
11118            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
11119            }
11120          }
11121          private static Builder create() {
11122            return new Builder();
11123          }
11124    
11125          public Builder clear() {
11126            super.clear();
11127            resultCTime_ = 0L;
11128            bitField0_ = (bitField0_ & ~0x00000001);
11129            return this;
11130          }
11131    
11132          public Builder clone() {
11133            return create().mergeFrom(buildPartial());
11134          }
11135    
11136          public com.google.protobuf.Descriptors.Descriptor
11137              getDescriptorForType() {
11138            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalCTimeResponseProto_descriptor;
11139          }
11140    
11141          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto getDefaultInstanceForType() {
11142            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto.getDefaultInstance();
11143          }
11144    
11145          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto build() {
11146            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto result = buildPartial();
11147            if (!result.isInitialized()) {
11148              throw newUninitializedMessageException(result);
11149            }
11150            return result;
11151          }
11152    
11153          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto buildPartial() {
11154            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto(this);
11155            int from_bitField0_ = bitField0_;
11156            int to_bitField0_ = 0;
11157            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
11158              to_bitField0_ |= 0x00000001;
11159            }
11160            result.resultCTime_ = resultCTime_;
11161            result.bitField0_ = to_bitField0_;
11162            onBuilt();
11163            return result;
11164          }
11165    
11166          public Builder mergeFrom(com.google.protobuf.Message other) {
11167            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto) {
11168              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto)other);
11169            } else {
11170              super.mergeFrom(other);
11171              return this;
11172            }
11173          }
11174    
11175          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto other) {
11176            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto.getDefaultInstance()) return this;
11177            if (other.hasResultCTime()) {
11178              setResultCTime(other.getResultCTime());
11179            }
11180            this.mergeUnknownFields(other.getUnknownFields());
11181            return this;
11182          }
11183    
11184          public final boolean isInitialized() {
11185            if (!hasResultCTime()) {
11186              
11187              return false;
11188            }
11189            return true;
11190          }
11191    
11192          public Builder mergeFrom(
11193              com.google.protobuf.CodedInputStream input,
11194              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11195              throws java.io.IOException {
11196            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto parsedMessage = null;
11197            try {
11198              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
11199            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
11200              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto) e.getUnfinishedMessage();
11201              throw e;
11202            } finally {
11203              if (parsedMessage != null) {
11204                mergeFrom(parsedMessage);
11205              }
11206            }
11207            return this;
11208          }
11209          private int bitField0_;
11210    
11211          // required int64 resultCTime = 1;
11212          private long resultCTime_ ;
11213          /**
11214           * <code>required int64 resultCTime = 1;</code>
11215           */
11216          public boolean hasResultCTime() {
11217            return ((bitField0_ & 0x00000001) == 0x00000001);
11218          }
11219          /**
11220           * <code>required int64 resultCTime = 1;</code>
11221           */
11222          public long getResultCTime() {
11223            return resultCTime_;
11224          }
11225          /**
11226           * <code>required int64 resultCTime = 1;</code>
11227           */
11228          public Builder setResultCTime(long value) {
11229            bitField0_ |= 0x00000001;
11230            resultCTime_ = value;
11231            onChanged();
11232            return this;
11233          }
11234          /**
11235           * <code>required int64 resultCTime = 1;</code>
11236           */
11237          public Builder clearResultCTime() {
11238            bitField0_ = (bitField0_ & ~0x00000001);
11239            resultCTime_ = 0L;
11240            onChanged();
11241            return this;
11242          }
11243    
11244          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.GetJournalCTimeResponseProto)
11245        }
11246    
11247        static {
11248          defaultInstance = new GetJournalCTimeResponseProto(true);
11249          defaultInstance.initFields();
11250        }
11251    
11252        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.GetJournalCTimeResponseProto)
11253      }
11254    
11255      public interface DoPreUpgradeRequestProtoOrBuilder
11256          extends com.google.protobuf.MessageOrBuilder {
11257    
11258        // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
11259        /**
11260         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
11261         */
11262        boolean hasJid();
11263        /**
11264         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
11265         */
11266        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
11267        /**
11268         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
11269         */
11270        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
11271      }
11272      /**
11273       * Protobuf type {@code hadoop.hdfs.qjournal.DoPreUpgradeRequestProto}
11274       *
11275       * <pre>
11276       **
11277       * doPreUpgrade()
11278       * </pre>
11279       */
11280      public static final class DoPreUpgradeRequestProto extends
11281          com.google.protobuf.GeneratedMessage
11282          implements DoPreUpgradeRequestProtoOrBuilder {
11283        // Use DoPreUpgradeRequestProto.newBuilder() to construct.
11284        private DoPreUpgradeRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
11285          super(builder);
11286          this.unknownFields = builder.getUnknownFields();
11287        }
11288        private DoPreUpgradeRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
11289    
11290        private static final DoPreUpgradeRequestProto defaultInstance;
11291        public static DoPreUpgradeRequestProto getDefaultInstance() {
11292          return defaultInstance;
11293        }
11294    
11295        public DoPreUpgradeRequestProto getDefaultInstanceForType() {
11296          return defaultInstance;
11297        }
11298    
11299        private final com.google.protobuf.UnknownFieldSet unknownFields;
11300        @java.lang.Override
11301        public final com.google.protobuf.UnknownFieldSet
11302            getUnknownFields() {
11303          return this.unknownFields;
11304        }
11305        private DoPreUpgradeRequestProto(
11306            com.google.protobuf.CodedInputStream input,
11307            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11308            throws com.google.protobuf.InvalidProtocolBufferException {
11309          initFields();
11310          int mutable_bitField0_ = 0;
11311          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
11312              com.google.protobuf.UnknownFieldSet.newBuilder();
11313          try {
11314            boolean done = false;
11315            while (!done) {
11316              int tag = input.readTag();
11317              switch (tag) {
11318                case 0:
11319                  done = true;
11320                  break;
11321                default: {
11322                  if (!parseUnknownField(input, unknownFields,
11323                                         extensionRegistry, tag)) {
11324                    done = true;
11325                  }
11326                  break;
11327                }
11328                case 10: {
11329                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
11330                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
11331                    subBuilder = jid_.toBuilder();
11332                  }
11333                  jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
11334                  if (subBuilder != null) {
11335                    subBuilder.mergeFrom(jid_);
11336                    jid_ = subBuilder.buildPartial();
11337                  }
11338                  bitField0_ |= 0x00000001;
11339                  break;
11340                }
11341              }
11342            }
11343          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
11344            throw e.setUnfinishedMessage(this);
11345          } catch (java.io.IOException e) {
11346            throw new com.google.protobuf.InvalidProtocolBufferException(
11347                e.getMessage()).setUnfinishedMessage(this);
11348          } finally {
11349            this.unknownFields = unknownFields.build();
11350            makeExtensionsImmutable();
11351          }
11352        }
11353        public static final com.google.protobuf.Descriptors.Descriptor
11354            getDescriptor() {
11355          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoPreUpgradeRequestProto_descriptor;
11356        }
11357    
11358        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11359            internalGetFieldAccessorTable() {
11360          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoPreUpgradeRequestProto_fieldAccessorTable
11361              .ensureFieldAccessorsInitialized(
11362                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto.Builder.class);
11363        }
11364    
11365        public static com.google.protobuf.Parser<DoPreUpgradeRequestProto> PARSER =
11366            new com.google.protobuf.AbstractParser<DoPreUpgradeRequestProto>() {
11367          public DoPreUpgradeRequestProto parsePartialFrom(
11368              com.google.protobuf.CodedInputStream input,
11369              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11370              throws com.google.protobuf.InvalidProtocolBufferException {
11371            return new DoPreUpgradeRequestProto(input, extensionRegistry);
11372          }
11373        };
11374    
11375        @java.lang.Override
11376        public com.google.protobuf.Parser<DoPreUpgradeRequestProto> getParserForType() {
11377          return PARSER;
11378        }
11379    
11380        private int bitField0_;
11381        // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
11382        public static final int JID_FIELD_NUMBER = 1;
11383        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
11384        /**
11385         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
11386         */
11387        public boolean hasJid() {
11388          return ((bitField0_ & 0x00000001) == 0x00000001);
11389        }
11390        /**
11391         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
11392         */
11393        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
11394          return jid_;
11395        }
11396        /**
11397         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
11398         */
11399        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
11400          return jid_;
11401        }
11402    
11403        private void initFields() {
11404          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
11405        }
11406        private byte memoizedIsInitialized = -1;
11407        public final boolean isInitialized() {
11408          byte isInitialized = memoizedIsInitialized;
11409          if (isInitialized != -1) return isInitialized == 1;
11410    
11411          if (!hasJid()) {
11412            memoizedIsInitialized = 0;
11413            return false;
11414          }
11415          if (!getJid().isInitialized()) {
11416            memoizedIsInitialized = 0;
11417            return false;
11418          }
11419          memoizedIsInitialized = 1;
11420          return true;
11421        }
11422    
11423        public void writeTo(com.google.protobuf.CodedOutputStream output)
11424                            throws java.io.IOException {
11425          getSerializedSize();
11426          if (((bitField0_ & 0x00000001) == 0x00000001)) {
11427            output.writeMessage(1, jid_);
11428          }
11429          getUnknownFields().writeTo(output);
11430        }
11431    
11432        private int memoizedSerializedSize = -1;
11433        public int getSerializedSize() {
11434          int size = memoizedSerializedSize;
11435          if (size != -1) return size;
11436    
11437          size = 0;
11438          if (((bitField0_ & 0x00000001) == 0x00000001)) {
11439            size += com.google.protobuf.CodedOutputStream
11440              .computeMessageSize(1, jid_);
11441          }
11442          size += getUnknownFields().getSerializedSize();
11443          memoizedSerializedSize = size;
11444          return size;
11445        }
11446    
11447        private static final long serialVersionUID = 0L;
11448        @java.lang.Override
11449        protected java.lang.Object writeReplace()
11450            throws java.io.ObjectStreamException {
11451          return super.writeReplace();
11452        }
11453    
11454        @java.lang.Override
11455        public boolean equals(final java.lang.Object obj) {
11456          if (obj == this) {
11457           return true;
11458          }
11459          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto)) {
11460            return super.equals(obj);
11461          }
11462          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto) obj;
11463    
11464          boolean result = true;
11465          result = result && (hasJid() == other.hasJid());
11466          if (hasJid()) {
11467            result = result && getJid()
11468                .equals(other.getJid());
11469          }
11470          result = result &&
11471              getUnknownFields().equals(other.getUnknownFields());
11472          return result;
11473        }
11474    
11475        private int memoizedHashCode = 0;
11476        @java.lang.Override
11477        public int hashCode() {
11478          if (memoizedHashCode != 0) {
11479            return memoizedHashCode;
11480          }
11481          int hash = 41;
11482          hash = (19 * hash) + getDescriptorForType().hashCode();
11483          if (hasJid()) {
11484            hash = (37 * hash) + JID_FIELD_NUMBER;
11485            hash = (53 * hash) + getJid().hashCode();
11486          }
11487          hash = (29 * hash) + getUnknownFields().hashCode();
11488          memoizedHashCode = hash;
11489          return hash;
11490        }
11491    
11492        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto parseFrom(
11493            com.google.protobuf.ByteString data)
11494            throws com.google.protobuf.InvalidProtocolBufferException {
11495          return PARSER.parseFrom(data);
11496        }
11497        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto parseFrom(
11498            com.google.protobuf.ByteString data,
11499            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11500            throws com.google.protobuf.InvalidProtocolBufferException {
11501          return PARSER.parseFrom(data, extensionRegistry);
11502        }
11503        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto parseFrom(byte[] data)
11504            throws com.google.protobuf.InvalidProtocolBufferException {
11505          return PARSER.parseFrom(data);
11506        }
11507        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto parseFrom(
11508            byte[] data,
11509            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11510            throws com.google.protobuf.InvalidProtocolBufferException {
11511          return PARSER.parseFrom(data, extensionRegistry);
11512        }
11513        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto parseFrom(java.io.InputStream input)
11514            throws java.io.IOException {
11515          return PARSER.parseFrom(input);
11516        }
11517        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto parseFrom(
11518            java.io.InputStream input,
11519            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11520            throws java.io.IOException {
11521          return PARSER.parseFrom(input, extensionRegistry);
11522        }
11523        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto parseDelimitedFrom(java.io.InputStream input)
11524            throws java.io.IOException {
11525          return PARSER.parseDelimitedFrom(input);
11526        }
11527        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto parseDelimitedFrom(
11528            java.io.InputStream input,
11529            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11530            throws java.io.IOException {
11531          return PARSER.parseDelimitedFrom(input, extensionRegistry);
11532        }
11533        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto parseFrom(
11534            com.google.protobuf.CodedInputStream input)
11535            throws java.io.IOException {
11536          return PARSER.parseFrom(input);
11537        }
11538        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto parseFrom(
11539            com.google.protobuf.CodedInputStream input,
11540            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11541            throws java.io.IOException {
11542          return PARSER.parseFrom(input, extensionRegistry);
11543        }
11544    
11545        public static Builder newBuilder() { return Builder.create(); }
11546        public Builder newBuilderForType() { return newBuilder(); }
11547        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto prototype) {
11548          return newBuilder().mergeFrom(prototype);
11549        }
11550        public Builder toBuilder() { return newBuilder(this); }
11551    
11552        @java.lang.Override
11553        protected Builder newBuilderForType(
11554            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
11555          Builder builder = new Builder(parent);
11556          return builder;
11557        }
11558        /**
11559         * Protobuf type {@code hadoop.hdfs.qjournal.DoPreUpgradeRequestProto}
11560         *
11561         * <pre>
11562         **
11563         * doPreUpgrade()
11564         * </pre>
11565         */
11566        public static final class Builder extends
11567            com.google.protobuf.GeneratedMessage.Builder<Builder>
11568           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProtoOrBuilder {
11569          public static final com.google.protobuf.Descriptors.Descriptor
11570              getDescriptor() {
11571            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoPreUpgradeRequestProto_descriptor;
11572          }
11573    
11574          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11575              internalGetFieldAccessorTable() {
11576            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoPreUpgradeRequestProto_fieldAccessorTable
11577                .ensureFieldAccessorsInitialized(
11578                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto.Builder.class);
11579          }
11580    
11581          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto.newBuilder()
11582          private Builder() {
11583            maybeForceBuilderInitialization();
11584          }
11585    
11586          private Builder(
11587              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
11588            super(parent);
11589            maybeForceBuilderInitialization();
11590          }
11591          private void maybeForceBuilderInitialization() {
11592            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
11593              getJidFieldBuilder();
11594            }
11595          }
11596          private static Builder create() {
11597            return new Builder();
11598          }
11599    
11600          public Builder clear() {
11601            super.clear();
11602            if (jidBuilder_ == null) {
11603              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
11604            } else {
11605              jidBuilder_.clear();
11606            }
11607            bitField0_ = (bitField0_ & ~0x00000001);
11608            return this;
11609          }
11610    
11611          public Builder clone() {
11612            return create().mergeFrom(buildPartial());
11613          }
11614    
11615          public com.google.protobuf.Descriptors.Descriptor
11616              getDescriptorForType() {
11617            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoPreUpgradeRequestProto_descriptor;
11618          }
11619    
11620          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto getDefaultInstanceForType() {
11621            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto.getDefaultInstance();
11622          }
11623    
11624          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto build() {
11625            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto result = buildPartial();
11626            if (!result.isInitialized()) {
11627              throw newUninitializedMessageException(result);
11628            }
11629            return result;
11630          }
11631    
11632          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto buildPartial() {
11633            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto(this);
11634            int from_bitField0_ = bitField0_;
11635            int to_bitField0_ = 0;
11636            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
11637              to_bitField0_ |= 0x00000001;
11638            }
11639            if (jidBuilder_ == null) {
11640              result.jid_ = jid_;
11641            } else {
11642              result.jid_ = jidBuilder_.build();
11643            }
11644            result.bitField0_ = to_bitField0_;
11645            onBuilt();
11646            return result;
11647          }
11648    
11649          public Builder mergeFrom(com.google.protobuf.Message other) {
11650            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto) {
11651              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto)other);
11652            } else {
11653              super.mergeFrom(other);
11654              return this;
11655            }
11656          }
11657    
11658          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto other) {
11659            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto.getDefaultInstance()) return this;
11660            if (other.hasJid()) {
11661              mergeJid(other.getJid());
11662            }
11663            this.mergeUnknownFields(other.getUnknownFields());
11664            return this;
11665          }
11666    
11667          public final boolean isInitialized() {
11668            if (!hasJid()) {
11669              
11670              return false;
11671            }
11672            if (!getJid().isInitialized()) {
11673              
11674              return false;
11675            }
11676            return true;
11677          }
11678    
11679          public Builder mergeFrom(
11680              com.google.protobuf.CodedInputStream input,
11681              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11682              throws java.io.IOException {
11683            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto parsedMessage = null;
11684            try {
11685              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
11686            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
11687              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto) e.getUnfinishedMessage();
11688              throw e;
11689            } finally {
11690              if (parsedMessage != null) {
11691                mergeFrom(parsedMessage);
11692              }
11693            }
11694            return this;
11695          }
11696          private int bitField0_;
11697    
11698          // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
11699          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
11700          private com.google.protobuf.SingleFieldBuilder<
11701              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
11702          /**
11703           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
11704           */
11705          public boolean hasJid() {
11706            return ((bitField0_ & 0x00000001) == 0x00000001);
11707          }
11708          /**
11709           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
11710           */
11711          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
11712            if (jidBuilder_ == null) {
11713              return jid_;
11714            } else {
11715              return jidBuilder_.getMessage();
11716            }
11717          }
11718          /**
11719           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
11720           */
11721          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
11722            if (jidBuilder_ == null) {
11723              if (value == null) {
11724                throw new NullPointerException();
11725              }
11726              jid_ = value;
11727              onChanged();
11728            } else {
11729              jidBuilder_.setMessage(value);
11730            }
11731            bitField0_ |= 0x00000001;
11732            return this;
11733          }
11734          /**
11735           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
11736           */
11737          public Builder setJid(
11738              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
11739            if (jidBuilder_ == null) {
11740              jid_ = builderForValue.build();
11741              onChanged();
11742            } else {
11743              jidBuilder_.setMessage(builderForValue.build());
11744            }
11745            bitField0_ |= 0x00000001;
11746            return this;
11747          }
11748          /**
11749           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
11750           */
11751          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
11752            if (jidBuilder_ == null) {
11753              if (((bitField0_ & 0x00000001) == 0x00000001) &&
11754                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
11755                jid_ =
11756                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
11757              } else {
11758                jid_ = value;
11759              }
11760              onChanged();
11761            } else {
11762              jidBuilder_.mergeFrom(value);
11763            }
11764            bitField0_ |= 0x00000001;
11765            return this;
11766          }
11767          /**
11768           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
11769           */
11770          public Builder clearJid() {
11771            if (jidBuilder_ == null) {
11772              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
11773              onChanged();
11774            } else {
11775              jidBuilder_.clear();
11776            }
11777            bitField0_ = (bitField0_ & ~0x00000001);
11778            return this;
11779          }
11780          /**
11781           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
11782           */
11783          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
11784            bitField0_ |= 0x00000001;
11785            onChanged();
11786            return getJidFieldBuilder().getBuilder();
11787          }
11788          /**
11789           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
11790           */
11791          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
11792            if (jidBuilder_ != null) {
11793              return jidBuilder_.getMessageOrBuilder();
11794            } else {
11795              return jid_;
11796            }
11797          }
11798          /**
11799           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
11800           */
11801          private com.google.protobuf.SingleFieldBuilder<
11802              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
11803              getJidFieldBuilder() {
11804            if (jidBuilder_ == null) {
11805              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
11806                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
11807                      jid_,
11808                      getParentForChildren(),
11809                      isClean());
11810              jid_ = null;
11811            }
11812            return jidBuilder_;
11813          }
11814    
11815          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.DoPreUpgradeRequestProto)
11816        }
11817    
11818        static {
11819          defaultInstance = new DoPreUpgradeRequestProto(true);
11820          defaultInstance.initFields();
11821        }
11822    
11823        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.DoPreUpgradeRequestProto)
11824      }
11825    
11826      public interface DoPreUpgradeResponseProtoOrBuilder
11827          extends com.google.protobuf.MessageOrBuilder {
11828      }
11829      /**
11830       * Protobuf type {@code hadoop.hdfs.qjournal.DoPreUpgradeResponseProto}
11831       */
11832      public static final class DoPreUpgradeResponseProto extends
11833          com.google.protobuf.GeneratedMessage
11834          implements DoPreUpgradeResponseProtoOrBuilder {
11835        // Use DoPreUpgradeResponseProto.newBuilder() to construct.
11836        private DoPreUpgradeResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
11837          super(builder);
11838          this.unknownFields = builder.getUnknownFields();
11839        }
11840        private DoPreUpgradeResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
11841    
11842        private static final DoPreUpgradeResponseProto defaultInstance;
11843        public static DoPreUpgradeResponseProto getDefaultInstance() {
11844          return defaultInstance;
11845        }
11846    
11847        public DoPreUpgradeResponseProto getDefaultInstanceForType() {
11848          return defaultInstance;
11849        }
11850    
11851        private final com.google.protobuf.UnknownFieldSet unknownFields;
11852        @java.lang.Override
11853        public final com.google.protobuf.UnknownFieldSet
11854            getUnknownFields() {
11855          return this.unknownFields;
11856        }
11857        private DoPreUpgradeResponseProto(
11858            com.google.protobuf.CodedInputStream input,
11859            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11860            throws com.google.protobuf.InvalidProtocolBufferException {
11861          initFields();
11862          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
11863              com.google.protobuf.UnknownFieldSet.newBuilder();
11864          try {
11865            boolean done = false;
11866            while (!done) {
11867              int tag = input.readTag();
11868              switch (tag) {
11869                case 0:
11870                  done = true;
11871                  break;
11872                default: {
11873                  if (!parseUnknownField(input, unknownFields,
11874                                         extensionRegistry, tag)) {
11875                    done = true;
11876                  }
11877                  break;
11878                }
11879              }
11880            }
11881          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
11882            throw e.setUnfinishedMessage(this);
11883          } catch (java.io.IOException e) {
11884            throw new com.google.protobuf.InvalidProtocolBufferException(
11885                e.getMessage()).setUnfinishedMessage(this);
11886          } finally {
11887            this.unknownFields = unknownFields.build();
11888            makeExtensionsImmutable();
11889          }
11890        }
11891        public static final com.google.protobuf.Descriptors.Descriptor
11892            getDescriptor() {
11893          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoPreUpgradeResponseProto_descriptor;
11894        }
11895    
11896        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11897            internalGetFieldAccessorTable() {
11898          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoPreUpgradeResponseProto_fieldAccessorTable
11899              .ensureFieldAccessorsInitialized(
11900                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto.Builder.class);
11901        }
11902    
11903        public static com.google.protobuf.Parser<DoPreUpgradeResponseProto> PARSER =
11904            new com.google.protobuf.AbstractParser<DoPreUpgradeResponseProto>() {
11905          public DoPreUpgradeResponseProto parsePartialFrom(
11906              com.google.protobuf.CodedInputStream input,
11907              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11908              throws com.google.protobuf.InvalidProtocolBufferException {
11909            return new DoPreUpgradeResponseProto(input, extensionRegistry);
11910          }
11911        };
11912    
11913        @java.lang.Override
11914        public com.google.protobuf.Parser<DoPreUpgradeResponseProto> getParserForType() {
11915          return PARSER;
11916        }
11917    
11918        private void initFields() {
11919        }
11920        private byte memoizedIsInitialized = -1;
11921        public final boolean isInitialized() {
11922          byte isInitialized = memoizedIsInitialized;
11923          if (isInitialized != -1) return isInitialized == 1;
11924    
11925          memoizedIsInitialized = 1;
11926          return true;
11927        }
11928    
11929        public void writeTo(com.google.protobuf.CodedOutputStream output)
11930                            throws java.io.IOException {
11931          getSerializedSize();
11932          getUnknownFields().writeTo(output);
11933        }
11934    
11935        private int memoizedSerializedSize = -1;
11936        public int getSerializedSize() {
11937          int size = memoizedSerializedSize;
11938          if (size != -1) return size;
11939    
11940          size = 0;
11941          size += getUnknownFields().getSerializedSize();
11942          memoizedSerializedSize = size;
11943          return size;
11944        }
11945    
11946        private static final long serialVersionUID = 0L;
11947        @java.lang.Override
11948        protected java.lang.Object writeReplace()
11949            throws java.io.ObjectStreamException {
11950          return super.writeReplace();
11951        }
11952    
11953        @java.lang.Override
11954        public boolean equals(final java.lang.Object obj) {
11955          if (obj == this) {
11956           return true;
11957          }
11958          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto)) {
11959            return super.equals(obj);
11960          }
11961          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto) obj;
11962    
11963          boolean result = true;
11964          result = result &&
11965              getUnknownFields().equals(other.getUnknownFields());
11966          return result;
11967        }
11968    
11969        private int memoizedHashCode = 0;
11970        @java.lang.Override
11971        public int hashCode() {
11972          if (memoizedHashCode != 0) {
11973            return memoizedHashCode;
11974          }
11975          int hash = 41;
11976          hash = (19 * hash) + getDescriptorForType().hashCode();
11977          hash = (29 * hash) + getUnknownFields().hashCode();
11978          memoizedHashCode = hash;
11979          return hash;
11980        }
11981    
11982        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto parseFrom(
11983            com.google.protobuf.ByteString data)
11984            throws com.google.protobuf.InvalidProtocolBufferException {
11985          return PARSER.parseFrom(data);
11986        }
11987        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto parseFrom(
11988            com.google.protobuf.ByteString data,
11989            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11990            throws com.google.protobuf.InvalidProtocolBufferException {
11991          return PARSER.parseFrom(data, extensionRegistry);
11992        }
11993        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto parseFrom(byte[] data)
11994            throws com.google.protobuf.InvalidProtocolBufferException {
11995          return PARSER.parseFrom(data);
11996        }
11997        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto parseFrom(
11998            byte[] data,
11999            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12000            throws com.google.protobuf.InvalidProtocolBufferException {
12001          return PARSER.parseFrom(data, extensionRegistry);
12002        }
12003        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto parseFrom(java.io.InputStream input)
12004            throws java.io.IOException {
12005          return PARSER.parseFrom(input);
12006        }
12007        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto parseFrom(
12008            java.io.InputStream input,
12009            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12010            throws java.io.IOException {
12011          return PARSER.parseFrom(input, extensionRegistry);
12012        }
12013        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto parseDelimitedFrom(java.io.InputStream input)
12014            throws java.io.IOException {
12015          return PARSER.parseDelimitedFrom(input);
12016        }
12017        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto parseDelimitedFrom(
12018            java.io.InputStream input,
12019            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12020            throws java.io.IOException {
12021          return PARSER.parseDelimitedFrom(input, extensionRegistry);
12022        }
12023        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto parseFrom(
12024            com.google.protobuf.CodedInputStream input)
12025            throws java.io.IOException {
12026          return PARSER.parseFrom(input);
12027        }
12028        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto parseFrom(
12029            com.google.protobuf.CodedInputStream input,
12030            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12031            throws java.io.IOException {
12032          return PARSER.parseFrom(input, extensionRegistry);
12033        }
12034    
12035        public static Builder newBuilder() { return Builder.create(); }
12036        public Builder newBuilderForType() { return newBuilder(); }
12037        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto prototype) {
12038          return newBuilder().mergeFrom(prototype);
12039        }
12040        public Builder toBuilder() { return newBuilder(this); }
12041    
12042        @java.lang.Override
12043        protected Builder newBuilderForType(
12044            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
12045          Builder builder = new Builder(parent);
12046          return builder;
12047        }
12048        /**
12049         * Protobuf type {@code hadoop.hdfs.qjournal.DoPreUpgradeResponseProto}
12050         */
12051        public static final class Builder extends
12052            com.google.protobuf.GeneratedMessage.Builder<Builder>
12053           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProtoOrBuilder {
12054          public static final com.google.protobuf.Descriptors.Descriptor
12055              getDescriptor() {
12056            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoPreUpgradeResponseProto_descriptor;
12057          }
12058    
12059          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
12060              internalGetFieldAccessorTable() {
12061            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoPreUpgradeResponseProto_fieldAccessorTable
12062                .ensureFieldAccessorsInitialized(
12063                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto.Builder.class);
12064          }
12065    
12066          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto.newBuilder()
12067          private Builder() {
12068            maybeForceBuilderInitialization();
12069          }
12070    
12071          private Builder(
12072              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
12073            super(parent);
12074            maybeForceBuilderInitialization();
12075          }
12076          private void maybeForceBuilderInitialization() {
12077            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
12078            }
12079          }
12080          private static Builder create() {
12081            return new Builder();
12082          }
12083    
12084          public Builder clear() {
12085            super.clear();
12086            return this;
12087          }
12088    
12089          public Builder clone() {
12090            return create().mergeFrom(buildPartial());
12091          }
12092    
12093          public com.google.protobuf.Descriptors.Descriptor
12094              getDescriptorForType() {
12095            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoPreUpgradeResponseProto_descriptor;
12096          }
12097    
12098          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto getDefaultInstanceForType() {
12099            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto.getDefaultInstance();
12100          }
12101    
12102          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto build() {
12103            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto result = buildPartial();
12104            if (!result.isInitialized()) {
12105              throw newUninitializedMessageException(result);
12106            }
12107            return result;
12108          }
12109    
12110          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto buildPartial() {
12111            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto(this);
12112            onBuilt();
12113            return result;
12114          }
12115    
12116          public Builder mergeFrom(com.google.protobuf.Message other) {
12117            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto) {
12118              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto)other);
12119            } else {
12120              super.mergeFrom(other);
12121              return this;
12122            }
12123          }
12124    
12125          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto other) {
12126            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto.getDefaultInstance()) return this;
12127            this.mergeUnknownFields(other.getUnknownFields());
12128            return this;
12129          }
12130    
12131          public final boolean isInitialized() {
12132            return true;
12133          }
12134    
12135          public Builder mergeFrom(
12136              com.google.protobuf.CodedInputStream input,
12137              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12138              throws java.io.IOException {
12139            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto parsedMessage = null;
12140            try {
12141              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
12142            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
12143              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto) e.getUnfinishedMessage();
12144              throw e;
12145            } finally {
12146              if (parsedMessage != null) {
12147                mergeFrom(parsedMessage);
12148              }
12149            }
12150            return this;
12151          }
12152    
12153          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.DoPreUpgradeResponseProto)
12154        }
12155    
12156        static {
12157          defaultInstance = new DoPreUpgradeResponseProto(true);
12158          defaultInstance.initFields();
12159        }
12160    
12161        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.DoPreUpgradeResponseProto)
12162      }
12163    
12164      public interface DoUpgradeRequestProtoOrBuilder
12165          extends com.google.protobuf.MessageOrBuilder {
12166    
12167        // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
12168        /**
12169         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
12170         */
12171        boolean hasJid();
12172        /**
12173         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
12174         */
12175        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
12176        /**
12177         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
12178         */
12179        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
12180    
12181        // required .hadoop.hdfs.StorageInfoProto sInfo = 2;
12182        /**
12183         * <code>required .hadoop.hdfs.StorageInfoProto sInfo = 2;</code>
12184         */
12185        boolean hasSInfo();
12186        /**
12187         * <code>required .hadoop.hdfs.StorageInfoProto sInfo = 2;</code>
12188         */
12189        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getSInfo();
12190        /**
12191         * <code>required .hadoop.hdfs.StorageInfoProto sInfo = 2;</code>
12192         */
12193        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getSInfoOrBuilder();
12194      }
12195      /**
12196       * Protobuf type {@code hadoop.hdfs.qjournal.DoUpgradeRequestProto}
12197       *
12198       * <pre>
12199       **
12200       * doUpgrade()
12201       * </pre>
12202       */
12203      public static final class DoUpgradeRequestProto extends
12204          com.google.protobuf.GeneratedMessage
12205          implements DoUpgradeRequestProtoOrBuilder {
12206        // Use DoUpgradeRequestProto.newBuilder() to construct.
12207        private DoUpgradeRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
12208          super(builder);
12209          this.unknownFields = builder.getUnknownFields();
12210        }
12211        private DoUpgradeRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
12212    
12213        private static final DoUpgradeRequestProto defaultInstance;
12214        public static DoUpgradeRequestProto getDefaultInstance() {
12215          return defaultInstance;
12216        }
12217    
12218        public DoUpgradeRequestProto getDefaultInstanceForType() {
12219          return defaultInstance;
12220        }
12221    
12222        private final com.google.protobuf.UnknownFieldSet unknownFields;
12223        @java.lang.Override
12224        public final com.google.protobuf.UnknownFieldSet
12225            getUnknownFields() {
12226          return this.unknownFields;
12227        }
12228        private DoUpgradeRequestProto(
12229            com.google.protobuf.CodedInputStream input,
12230            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12231            throws com.google.protobuf.InvalidProtocolBufferException {
12232          initFields();
12233          int mutable_bitField0_ = 0;
12234          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
12235              com.google.protobuf.UnknownFieldSet.newBuilder();
12236          try {
12237            boolean done = false;
12238            while (!done) {
12239              int tag = input.readTag();
12240              switch (tag) {
12241                case 0:
12242                  done = true;
12243                  break;
12244                default: {
12245                  if (!parseUnknownField(input, unknownFields,
12246                                         extensionRegistry, tag)) {
12247                    done = true;
12248                  }
12249                  break;
12250                }
12251                case 10: {
12252                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
12253                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
12254                    subBuilder = jid_.toBuilder();
12255                  }
12256                  jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
12257                  if (subBuilder != null) {
12258                    subBuilder.mergeFrom(jid_);
12259                    jid_ = subBuilder.buildPartial();
12260                  }
12261                  bitField0_ |= 0x00000001;
12262                  break;
12263                }
12264                case 18: {
12265                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder subBuilder = null;
12266                  if (((bitField0_ & 0x00000002) == 0x00000002)) {
12267                    subBuilder = sInfo_.toBuilder();
12268                  }
12269                  sInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.PARSER, extensionRegistry);
12270                  if (subBuilder != null) {
12271                    subBuilder.mergeFrom(sInfo_);
12272                    sInfo_ = subBuilder.buildPartial();
12273                  }
12274                  bitField0_ |= 0x00000002;
12275                  break;
12276                }
12277              }
12278            }
12279          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
12280            throw e.setUnfinishedMessage(this);
12281          } catch (java.io.IOException e) {
12282            throw new com.google.protobuf.InvalidProtocolBufferException(
12283                e.getMessage()).setUnfinishedMessage(this);
12284          } finally {
12285            this.unknownFields = unknownFields.build();
12286            makeExtensionsImmutable();
12287          }
12288        }
12289        public static final com.google.protobuf.Descriptors.Descriptor
12290            getDescriptor() {
12291          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoUpgradeRequestProto_descriptor;
12292        }
12293    
12294        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
12295            internalGetFieldAccessorTable() {
12296          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoUpgradeRequestProto_fieldAccessorTable
12297              .ensureFieldAccessorsInitialized(
12298                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto.Builder.class);
12299        }
12300    
12301        public static com.google.protobuf.Parser<DoUpgradeRequestProto> PARSER =
12302            new com.google.protobuf.AbstractParser<DoUpgradeRequestProto>() {
12303          public DoUpgradeRequestProto parsePartialFrom(
12304              com.google.protobuf.CodedInputStream input,
12305              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12306              throws com.google.protobuf.InvalidProtocolBufferException {
12307            return new DoUpgradeRequestProto(input, extensionRegistry);
12308          }
12309        };
12310    
12311        @java.lang.Override
12312        public com.google.protobuf.Parser<DoUpgradeRequestProto> getParserForType() {
12313          return PARSER;
12314        }
12315    
12316        private int bitField0_;
12317        // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
12318        public static final int JID_FIELD_NUMBER = 1;
12319        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
12320        /**
12321         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
12322         */
12323        public boolean hasJid() {
12324          return ((bitField0_ & 0x00000001) == 0x00000001);
12325        }
12326        /**
12327         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
12328         */
12329        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
12330          return jid_;
12331        }
12332        /**
12333         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
12334         */
12335        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
12336          return jid_;
12337        }
12338    
12339        // required .hadoop.hdfs.StorageInfoProto sInfo = 2;
12340        public static final int SINFO_FIELD_NUMBER = 2;
12341        private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto sInfo_;
12342        /**
12343         * <code>required .hadoop.hdfs.StorageInfoProto sInfo = 2;</code>
12344         */
12345        public boolean hasSInfo() {
12346          return ((bitField0_ & 0x00000002) == 0x00000002);
12347        }
12348        /**
12349         * <code>required .hadoop.hdfs.StorageInfoProto sInfo = 2;</code>
12350         */
12351        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getSInfo() {
12352          return sInfo_;
12353        }
12354        /**
12355         * <code>required .hadoop.hdfs.StorageInfoProto sInfo = 2;</code>
12356         */
12357        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getSInfoOrBuilder() {
12358          return sInfo_;
12359        }
12360    
12361        private void initFields() {
12362          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
12363          sInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
12364        }
12365        private byte memoizedIsInitialized = -1;
12366        public final boolean isInitialized() {
12367          byte isInitialized = memoizedIsInitialized;
12368          if (isInitialized != -1) return isInitialized == 1;
12369    
12370          if (!hasJid()) {
12371            memoizedIsInitialized = 0;
12372            return false;
12373          }
12374          if (!hasSInfo()) {
12375            memoizedIsInitialized = 0;
12376            return false;
12377          }
12378          if (!getJid().isInitialized()) {
12379            memoizedIsInitialized = 0;
12380            return false;
12381          }
12382          if (!getSInfo().isInitialized()) {
12383            memoizedIsInitialized = 0;
12384            return false;
12385          }
12386          memoizedIsInitialized = 1;
12387          return true;
12388        }
12389    
12390        public void writeTo(com.google.protobuf.CodedOutputStream output)
12391                            throws java.io.IOException {
12392          getSerializedSize();
12393          if (((bitField0_ & 0x00000001) == 0x00000001)) {
12394            output.writeMessage(1, jid_);
12395          }
12396          if (((bitField0_ & 0x00000002) == 0x00000002)) {
12397            output.writeMessage(2, sInfo_);
12398          }
12399          getUnknownFields().writeTo(output);
12400        }
12401    
12402        private int memoizedSerializedSize = -1;
12403        public int getSerializedSize() {
12404          int size = memoizedSerializedSize;
12405          if (size != -1) return size;
12406    
12407          size = 0;
12408          if (((bitField0_ & 0x00000001) == 0x00000001)) {
12409            size += com.google.protobuf.CodedOutputStream
12410              .computeMessageSize(1, jid_);
12411          }
12412          if (((bitField0_ & 0x00000002) == 0x00000002)) {
12413            size += com.google.protobuf.CodedOutputStream
12414              .computeMessageSize(2, sInfo_);
12415          }
12416          size += getUnknownFields().getSerializedSize();
12417          memoizedSerializedSize = size;
12418          return size;
12419        }
12420    
12421        private static final long serialVersionUID = 0L;
12422        @java.lang.Override
12423        protected java.lang.Object writeReplace()
12424            throws java.io.ObjectStreamException {
12425          return super.writeReplace();
12426        }
12427    
12428        @java.lang.Override
12429        public boolean equals(final java.lang.Object obj) {
12430          if (obj == this) {
12431           return true;
12432          }
12433          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto)) {
12434            return super.equals(obj);
12435          }
12436          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto) obj;
12437    
12438          boolean result = true;
12439          result = result && (hasJid() == other.hasJid());
12440          if (hasJid()) {
12441            result = result && getJid()
12442                .equals(other.getJid());
12443          }
12444          result = result && (hasSInfo() == other.hasSInfo());
12445          if (hasSInfo()) {
12446            result = result && getSInfo()
12447                .equals(other.getSInfo());
12448          }
12449          result = result &&
12450              getUnknownFields().equals(other.getUnknownFields());
12451          return result;
12452        }
12453    
12454        private int memoizedHashCode = 0;
12455        @java.lang.Override
12456        public int hashCode() {
12457          if (memoizedHashCode != 0) {
12458            return memoizedHashCode;
12459          }
12460          int hash = 41;
12461          hash = (19 * hash) + getDescriptorForType().hashCode();
12462          if (hasJid()) {
12463            hash = (37 * hash) + JID_FIELD_NUMBER;
12464            hash = (53 * hash) + getJid().hashCode();
12465          }
12466          if (hasSInfo()) {
12467            hash = (37 * hash) + SINFO_FIELD_NUMBER;
12468            hash = (53 * hash) + getSInfo().hashCode();
12469          }
12470          hash = (29 * hash) + getUnknownFields().hashCode();
12471          memoizedHashCode = hash;
12472          return hash;
12473        }
12474    
12475        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto parseFrom(
12476            com.google.protobuf.ByteString data)
12477            throws com.google.protobuf.InvalidProtocolBufferException {
12478          return PARSER.parseFrom(data);
12479        }
12480        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto parseFrom(
12481            com.google.protobuf.ByteString data,
12482            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12483            throws com.google.protobuf.InvalidProtocolBufferException {
12484          return PARSER.parseFrom(data, extensionRegistry);
12485        }
12486        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto parseFrom(byte[] data)
12487            throws com.google.protobuf.InvalidProtocolBufferException {
12488          return PARSER.parseFrom(data);
12489        }
12490        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto parseFrom(
12491            byte[] data,
12492            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12493            throws com.google.protobuf.InvalidProtocolBufferException {
12494          return PARSER.parseFrom(data, extensionRegistry);
12495        }
12496        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto parseFrom(java.io.InputStream input)
12497            throws java.io.IOException {
12498          return PARSER.parseFrom(input);
12499        }
12500        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto parseFrom(
12501            java.io.InputStream input,
12502            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12503            throws java.io.IOException {
12504          return PARSER.parseFrom(input, extensionRegistry);
12505        }
12506        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto parseDelimitedFrom(java.io.InputStream input)
12507            throws java.io.IOException {
12508          return PARSER.parseDelimitedFrom(input);
12509        }
12510        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto parseDelimitedFrom(
12511            java.io.InputStream input,
12512            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12513            throws java.io.IOException {
12514          return PARSER.parseDelimitedFrom(input, extensionRegistry);
12515        }
12516        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto parseFrom(
12517            com.google.protobuf.CodedInputStream input)
12518            throws java.io.IOException {
12519          return PARSER.parseFrom(input);
12520        }
12521        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto parseFrom(
12522            com.google.protobuf.CodedInputStream input,
12523            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12524            throws java.io.IOException {
12525          return PARSER.parseFrom(input, extensionRegistry);
12526        }
12527    
12528        public static Builder newBuilder() { return Builder.create(); }
12529        public Builder newBuilderForType() { return newBuilder(); }
12530        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto prototype) {
12531          return newBuilder().mergeFrom(prototype);
12532        }
12533        public Builder toBuilder() { return newBuilder(this); }
12534    
12535        @java.lang.Override
12536        protected Builder newBuilderForType(
12537            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
12538          Builder builder = new Builder(parent);
12539          return builder;
12540        }
12541        /**
12542         * Protobuf type {@code hadoop.hdfs.qjournal.DoUpgradeRequestProto}
12543         *
12544         * <pre>
12545         **
12546         * doUpgrade()
12547         * </pre>
12548         */
12549        public static final class Builder extends
12550            com.google.protobuf.GeneratedMessage.Builder<Builder>
12551           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProtoOrBuilder {
12552          public static final com.google.protobuf.Descriptors.Descriptor
12553              getDescriptor() {
12554            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoUpgradeRequestProto_descriptor;
12555          }
12556    
12557          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
12558              internalGetFieldAccessorTable() {
12559            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoUpgradeRequestProto_fieldAccessorTable
12560                .ensureFieldAccessorsInitialized(
12561                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto.Builder.class);
12562          }
12563    
12564          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto.newBuilder()
12565          private Builder() {
12566            maybeForceBuilderInitialization();
12567          }
12568    
12569          private Builder(
12570              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
12571            super(parent);
12572            maybeForceBuilderInitialization();
12573          }
12574          private void maybeForceBuilderInitialization() {
12575            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
12576              getJidFieldBuilder();
12577              getSInfoFieldBuilder();
12578            }
12579          }
12580          private static Builder create() {
12581            return new Builder();
12582          }
12583    
12584          public Builder clear() {
12585            super.clear();
12586            if (jidBuilder_ == null) {
12587              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
12588            } else {
12589              jidBuilder_.clear();
12590            }
12591            bitField0_ = (bitField0_ & ~0x00000001);
12592            if (sInfoBuilder_ == null) {
12593              sInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
12594            } else {
12595              sInfoBuilder_.clear();
12596            }
12597            bitField0_ = (bitField0_ & ~0x00000002);
12598            return this;
12599          }
12600    
12601          public Builder clone() {
12602            return create().mergeFrom(buildPartial());
12603          }
12604    
12605          public com.google.protobuf.Descriptors.Descriptor
12606              getDescriptorForType() {
12607            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoUpgradeRequestProto_descriptor;
12608          }
12609    
12610          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto getDefaultInstanceForType() {
12611            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto.getDefaultInstance();
12612          }
12613    
12614          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto build() {
12615            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto result = buildPartial();
12616            if (!result.isInitialized()) {
12617              throw newUninitializedMessageException(result);
12618            }
12619            return result;
12620          }
12621    
12622          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto buildPartial() {
12623            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto(this);
12624            int from_bitField0_ = bitField0_;
12625            int to_bitField0_ = 0;
12626            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
12627              to_bitField0_ |= 0x00000001;
12628            }
12629            if (jidBuilder_ == null) {
12630              result.jid_ = jid_;
12631            } else {
12632              result.jid_ = jidBuilder_.build();
12633            }
12634            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
12635              to_bitField0_ |= 0x00000002;
12636            }
12637            if (sInfoBuilder_ == null) {
12638              result.sInfo_ = sInfo_;
12639            } else {
12640              result.sInfo_ = sInfoBuilder_.build();
12641            }
12642            result.bitField0_ = to_bitField0_;
12643            onBuilt();
12644            return result;
12645          }
12646    
12647          public Builder mergeFrom(com.google.protobuf.Message other) {
12648            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto) {
12649              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto)other);
12650            } else {
12651              super.mergeFrom(other);
12652              return this;
12653            }
12654          }
12655    
12656          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto other) {
12657            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto.getDefaultInstance()) return this;
12658            if (other.hasJid()) {
12659              mergeJid(other.getJid());
12660            }
12661            if (other.hasSInfo()) {
12662              mergeSInfo(other.getSInfo());
12663            }
12664            this.mergeUnknownFields(other.getUnknownFields());
12665            return this;
12666          }
12667    
12668          public final boolean isInitialized() {
12669            if (!hasJid()) {
12670              
12671              return false;
12672            }
12673            if (!hasSInfo()) {
12674              
12675              return false;
12676            }
12677            if (!getJid().isInitialized()) {
12678              
12679              return false;
12680            }
12681            if (!getSInfo().isInitialized()) {
12682              
12683              return false;
12684            }
12685            return true;
12686          }
12687    
12688          public Builder mergeFrom(
12689              com.google.protobuf.CodedInputStream input,
12690              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12691              throws java.io.IOException {
12692            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto parsedMessage = null;
12693            try {
12694              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
12695            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
12696              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto) e.getUnfinishedMessage();
12697              throw e;
12698            } finally {
12699              if (parsedMessage != null) {
12700                mergeFrom(parsedMessage);
12701              }
12702            }
12703            return this;
12704          }
12705          private int bitField0_;
12706    
12707          // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
12708          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
12709          private com.google.protobuf.SingleFieldBuilder<
12710              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
12711          /**
12712           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
12713           */
12714          public boolean hasJid() {
12715            return ((bitField0_ & 0x00000001) == 0x00000001);
12716          }
12717          /**
12718           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
12719           */
12720          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
12721            if (jidBuilder_ == null) {
12722              return jid_;
12723            } else {
12724              return jidBuilder_.getMessage();
12725            }
12726          }
12727          /**
12728           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
12729           */
12730          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
12731            if (jidBuilder_ == null) {
12732              if (value == null) {
12733                throw new NullPointerException();
12734              }
12735              jid_ = value;
12736              onChanged();
12737            } else {
12738              jidBuilder_.setMessage(value);
12739            }
12740            bitField0_ |= 0x00000001;
12741            return this;
12742          }
12743          /**
12744           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
12745           */
12746          public Builder setJid(
12747              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
12748            if (jidBuilder_ == null) {
12749              jid_ = builderForValue.build();
12750              onChanged();
12751            } else {
12752              jidBuilder_.setMessage(builderForValue.build());
12753            }
12754            bitField0_ |= 0x00000001;
12755            return this;
12756          }
12757          /**
12758           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
12759           */
12760          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
12761            if (jidBuilder_ == null) {
12762              if (((bitField0_ & 0x00000001) == 0x00000001) &&
12763                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
12764                jid_ =
12765                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
12766              } else {
12767                jid_ = value;
12768              }
12769              onChanged();
12770            } else {
12771              jidBuilder_.mergeFrom(value);
12772            }
12773            bitField0_ |= 0x00000001;
12774            return this;
12775          }
12776          /**
12777           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
12778           */
12779          public Builder clearJid() {
12780            if (jidBuilder_ == null) {
12781              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
12782              onChanged();
12783            } else {
12784              jidBuilder_.clear();
12785            }
12786            bitField0_ = (bitField0_ & ~0x00000001);
12787            return this;
12788          }
12789          /**
12790           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
12791           */
12792          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
12793            bitField0_ |= 0x00000001;
12794            onChanged();
12795            return getJidFieldBuilder().getBuilder();
12796          }
12797          /**
12798           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
12799           */
12800          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
12801            if (jidBuilder_ != null) {
12802              return jidBuilder_.getMessageOrBuilder();
12803            } else {
12804              return jid_;
12805            }
12806          }
12807          /**
12808           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
12809           */
12810          private com.google.protobuf.SingleFieldBuilder<
12811              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
12812              getJidFieldBuilder() {
12813            if (jidBuilder_ == null) {
12814              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
12815                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
12816                      jid_,
12817                      getParentForChildren(),
12818                      isClean());
12819              jid_ = null;
12820            }
12821            return jidBuilder_;
12822          }
12823    
12824          // required .hadoop.hdfs.StorageInfoProto sInfo = 2;
12825          private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto sInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
12826          private com.google.protobuf.SingleFieldBuilder<
12827              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> sInfoBuilder_;
12828          /**
12829           * <code>required .hadoop.hdfs.StorageInfoProto sInfo = 2;</code>
12830           */
12831          public boolean hasSInfo() {
12832            return ((bitField0_ & 0x00000002) == 0x00000002);
12833          }
12834          /**
12835           * <code>required .hadoop.hdfs.StorageInfoProto sInfo = 2;</code>
12836           */
12837          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getSInfo() {
12838            if (sInfoBuilder_ == null) {
12839              return sInfo_;
12840            } else {
12841              return sInfoBuilder_.getMessage();
12842            }
12843          }
12844          /**
12845           * <code>required .hadoop.hdfs.StorageInfoProto sInfo = 2;</code>
12846           */
12847          public Builder setSInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) {
12848            if (sInfoBuilder_ == null) {
12849              if (value == null) {
12850                throw new NullPointerException();
12851              }
12852              sInfo_ = value;
12853              onChanged();
12854            } else {
12855              sInfoBuilder_.setMessage(value);
12856            }
12857            bitField0_ |= 0x00000002;
12858            return this;
12859          }
12860          /**
12861           * <code>required .hadoop.hdfs.StorageInfoProto sInfo = 2;</code>
12862           */
12863          public Builder setSInfo(
12864              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder builderForValue) {
12865            if (sInfoBuilder_ == null) {
12866              sInfo_ = builderForValue.build();
12867              onChanged();
12868            } else {
12869              sInfoBuilder_.setMessage(builderForValue.build());
12870            }
12871            bitField0_ |= 0x00000002;
12872            return this;
12873          }
12874          /**
12875           * <code>required .hadoop.hdfs.StorageInfoProto sInfo = 2;</code>
12876           */
12877          public Builder mergeSInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) {
12878            if (sInfoBuilder_ == null) {
12879              if (((bitField0_ & 0x00000002) == 0x00000002) &&
12880                  sInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance()) {
12881                sInfo_ =
12882                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder(sInfo_).mergeFrom(value).buildPartial();
12883              } else {
12884                sInfo_ = value;
12885              }
12886              onChanged();
12887            } else {
12888              sInfoBuilder_.mergeFrom(value);
12889            }
12890            bitField0_ |= 0x00000002;
12891            return this;
12892          }
12893          /**
12894           * <code>required .hadoop.hdfs.StorageInfoProto sInfo = 2;</code>
12895           */
12896          public Builder clearSInfo() {
12897            if (sInfoBuilder_ == null) {
12898              sInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
12899              onChanged();
12900            } else {
12901              sInfoBuilder_.clear();
12902            }
12903            bitField0_ = (bitField0_ & ~0x00000002);
12904            return this;
12905          }
12906          /**
12907           * <code>required .hadoop.hdfs.StorageInfoProto sInfo = 2;</code>
12908           */
12909          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder getSInfoBuilder() {
12910            bitField0_ |= 0x00000002;
12911            onChanged();
12912            return getSInfoFieldBuilder().getBuilder();
12913          }
12914          /**
12915           * <code>required .hadoop.hdfs.StorageInfoProto sInfo = 2;</code>
12916           */
12917          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getSInfoOrBuilder() {
12918            if (sInfoBuilder_ != null) {
12919              return sInfoBuilder_.getMessageOrBuilder();
12920            } else {
12921              return sInfo_;
12922            }
12923          }
12924          /**
12925           * <code>required .hadoop.hdfs.StorageInfoProto sInfo = 2;</code>
12926           */
12927          private com.google.protobuf.SingleFieldBuilder<
12928              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> 
12929              getSInfoFieldBuilder() {
12930            if (sInfoBuilder_ == null) {
12931              sInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
12932                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder>(
12933                      sInfo_,
12934                      getParentForChildren(),
12935                      isClean());
12936              sInfo_ = null;
12937            }
12938            return sInfoBuilder_;
12939          }
12940    
12941          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.DoUpgradeRequestProto)
12942        }
12943    
12944        static {
12945          defaultInstance = new DoUpgradeRequestProto(true);
12946          defaultInstance.initFields();
12947        }
12948    
12949        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.DoUpgradeRequestProto)
12950      }
12951    
12952      public interface DoUpgradeResponseProtoOrBuilder
12953          extends com.google.protobuf.MessageOrBuilder {
12954      }
12955      /**
12956       * Protobuf type {@code hadoop.hdfs.qjournal.DoUpgradeResponseProto}
12957       */
12958      public static final class DoUpgradeResponseProto extends
12959          com.google.protobuf.GeneratedMessage
12960          implements DoUpgradeResponseProtoOrBuilder {
12961        // Use DoUpgradeResponseProto.newBuilder() to construct.
12962        private DoUpgradeResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
12963          super(builder);
12964          this.unknownFields = builder.getUnknownFields();
12965        }
12966        private DoUpgradeResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
12967    
12968        private static final DoUpgradeResponseProto defaultInstance;
12969        public static DoUpgradeResponseProto getDefaultInstance() {
12970          return defaultInstance;
12971        }
12972    
12973        public DoUpgradeResponseProto getDefaultInstanceForType() {
12974          return defaultInstance;
12975        }
12976    
12977        private final com.google.protobuf.UnknownFieldSet unknownFields;
12978        @java.lang.Override
12979        public final com.google.protobuf.UnknownFieldSet
12980            getUnknownFields() {
12981          return this.unknownFields;
12982        }
12983        private DoUpgradeResponseProto(
12984            com.google.protobuf.CodedInputStream input,
12985            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12986            throws com.google.protobuf.InvalidProtocolBufferException {
12987          initFields();
12988          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
12989              com.google.protobuf.UnknownFieldSet.newBuilder();
12990          try {
12991            boolean done = false;
12992            while (!done) {
12993              int tag = input.readTag();
12994              switch (tag) {
12995                case 0:
12996                  done = true;
12997                  break;
12998                default: {
12999                  if (!parseUnknownField(input, unknownFields,
13000                                         extensionRegistry, tag)) {
13001                    done = true;
13002                  }
13003                  break;
13004                }
13005              }
13006            }
13007          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
13008            throw e.setUnfinishedMessage(this);
13009          } catch (java.io.IOException e) {
13010            throw new com.google.protobuf.InvalidProtocolBufferException(
13011                e.getMessage()).setUnfinishedMessage(this);
13012          } finally {
13013            this.unknownFields = unknownFields.build();
13014            makeExtensionsImmutable();
13015          }
13016        }
13017        public static final com.google.protobuf.Descriptors.Descriptor
13018            getDescriptor() {
13019          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoUpgradeResponseProto_descriptor;
13020        }
13021    
13022        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
13023            internalGetFieldAccessorTable() {
13024          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoUpgradeResponseProto_fieldAccessorTable
13025              .ensureFieldAccessorsInitialized(
13026                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto.Builder.class);
13027        }
13028    
13029        public static com.google.protobuf.Parser<DoUpgradeResponseProto> PARSER =
13030            new com.google.protobuf.AbstractParser<DoUpgradeResponseProto>() {
13031          public DoUpgradeResponseProto parsePartialFrom(
13032              com.google.protobuf.CodedInputStream input,
13033              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13034              throws com.google.protobuf.InvalidProtocolBufferException {
13035            return new DoUpgradeResponseProto(input, extensionRegistry);
13036          }
13037        };
13038    
13039        @java.lang.Override
13040        public com.google.protobuf.Parser<DoUpgradeResponseProto> getParserForType() {
13041          return PARSER;
13042        }
13043    
13044        private void initFields() {
13045        }
13046        private byte memoizedIsInitialized = -1;
13047        public final boolean isInitialized() {
13048          byte isInitialized = memoizedIsInitialized;
13049          if (isInitialized != -1) return isInitialized == 1;
13050    
13051          memoizedIsInitialized = 1;
13052          return true;
13053        }
13054    
13055        public void writeTo(com.google.protobuf.CodedOutputStream output)
13056                            throws java.io.IOException {
13057          getSerializedSize();
13058          getUnknownFields().writeTo(output);
13059        }
13060    
13061        private int memoizedSerializedSize = -1;
13062        public int getSerializedSize() {
13063          int size = memoizedSerializedSize;
13064          if (size != -1) return size;
13065    
13066          size = 0;
13067          size += getUnknownFields().getSerializedSize();
13068          memoizedSerializedSize = size;
13069          return size;
13070        }
13071    
13072        private static final long serialVersionUID = 0L;
13073        @java.lang.Override
13074        protected java.lang.Object writeReplace()
13075            throws java.io.ObjectStreamException {
13076          return super.writeReplace();
13077        }
13078    
13079        @java.lang.Override
13080        public boolean equals(final java.lang.Object obj) {
13081          if (obj == this) {
13082           return true;
13083          }
13084          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto)) {
13085            return super.equals(obj);
13086          }
13087          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto) obj;
13088    
13089          boolean result = true;
13090          result = result &&
13091              getUnknownFields().equals(other.getUnknownFields());
13092          return result;
13093        }
13094    
13095        private int memoizedHashCode = 0;
13096        @java.lang.Override
13097        public int hashCode() {
13098          if (memoizedHashCode != 0) {
13099            return memoizedHashCode;
13100          }
13101          int hash = 41;
13102          hash = (19 * hash) + getDescriptorForType().hashCode();
13103          hash = (29 * hash) + getUnknownFields().hashCode();
13104          memoizedHashCode = hash;
13105          return hash;
13106        }
13107    
13108        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto parseFrom(
13109            com.google.protobuf.ByteString data)
13110            throws com.google.protobuf.InvalidProtocolBufferException {
13111          return PARSER.parseFrom(data);
13112        }
13113        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto parseFrom(
13114            com.google.protobuf.ByteString data,
13115            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13116            throws com.google.protobuf.InvalidProtocolBufferException {
13117          return PARSER.parseFrom(data, extensionRegistry);
13118        }
13119        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto parseFrom(byte[] data)
13120            throws com.google.protobuf.InvalidProtocolBufferException {
13121          return PARSER.parseFrom(data);
13122        }
13123        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto parseFrom(
13124            byte[] data,
13125            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13126            throws com.google.protobuf.InvalidProtocolBufferException {
13127          return PARSER.parseFrom(data, extensionRegistry);
13128        }
13129        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto parseFrom(java.io.InputStream input)
13130            throws java.io.IOException {
13131          return PARSER.parseFrom(input);
13132        }
13133        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto parseFrom(
13134            java.io.InputStream input,
13135            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13136            throws java.io.IOException {
13137          return PARSER.parseFrom(input, extensionRegistry);
13138        }
13139        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto parseDelimitedFrom(java.io.InputStream input)
13140            throws java.io.IOException {
13141          return PARSER.parseDelimitedFrom(input);
13142        }
13143        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto parseDelimitedFrom(
13144            java.io.InputStream input,
13145            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13146            throws java.io.IOException {
13147          return PARSER.parseDelimitedFrom(input, extensionRegistry);
13148        }
13149        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto parseFrom(
13150            com.google.protobuf.CodedInputStream input)
13151            throws java.io.IOException {
13152          return PARSER.parseFrom(input);
13153        }
13154        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto parseFrom(
13155            com.google.protobuf.CodedInputStream input,
13156            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13157            throws java.io.IOException {
13158          return PARSER.parseFrom(input, extensionRegistry);
13159        }
13160    
13161        public static Builder newBuilder() { return Builder.create(); }
13162        public Builder newBuilderForType() { return newBuilder(); }
13163        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto prototype) {
13164          return newBuilder().mergeFrom(prototype);
13165        }
13166        public Builder toBuilder() { return newBuilder(this); }
13167    
13168        @java.lang.Override
13169        protected Builder newBuilderForType(
13170            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
13171          Builder builder = new Builder(parent);
13172          return builder;
13173        }
13174        /**
13175         * Protobuf type {@code hadoop.hdfs.qjournal.DoUpgradeResponseProto}
13176         */
13177        public static final class Builder extends
13178            com.google.protobuf.GeneratedMessage.Builder<Builder>
13179           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProtoOrBuilder {
13180          public static final com.google.protobuf.Descriptors.Descriptor
13181              getDescriptor() {
13182            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoUpgradeResponseProto_descriptor;
13183          }
13184    
13185          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
13186              internalGetFieldAccessorTable() {
13187            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoUpgradeResponseProto_fieldAccessorTable
13188                .ensureFieldAccessorsInitialized(
13189                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto.Builder.class);
13190          }
13191    
13192          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto.newBuilder()
13193          private Builder() {
13194            maybeForceBuilderInitialization();
13195          }
13196    
13197          private Builder(
13198              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
13199            super(parent);
13200            maybeForceBuilderInitialization();
13201          }
13202          private void maybeForceBuilderInitialization() {
13203            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
13204            }
13205          }
13206          private static Builder create() {
13207            return new Builder();
13208          }
13209    
13210          public Builder clear() {
13211            super.clear();
13212            return this;
13213          }
13214    
13215          public Builder clone() {
13216            return create().mergeFrom(buildPartial());
13217          }
13218    
13219          public com.google.protobuf.Descriptors.Descriptor
13220              getDescriptorForType() {
13221            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoUpgradeResponseProto_descriptor;
13222          }
13223    
13224          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto getDefaultInstanceForType() {
13225            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto.getDefaultInstance();
13226          }
13227    
13228          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto build() {
13229            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto result = buildPartial();
13230            if (!result.isInitialized()) {
13231              throw newUninitializedMessageException(result);
13232            }
13233            return result;
13234          }
13235    
13236          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto buildPartial() {
13237            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto(this);
13238            onBuilt();
13239            return result;
13240          }
13241    
13242          public Builder mergeFrom(com.google.protobuf.Message other) {
13243            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto) {
13244              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto)other);
13245            } else {
13246              super.mergeFrom(other);
13247              return this;
13248            }
13249          }
13250    
13251          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto other) {
13252            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto.getDefaultInstance()) return this;
13253            this.mergeUnknownFields(other.getUnknownFields());
13254            return this;
13255          }
13256    
13257          public final boolean isInitialized() {
13258            return true;
13259          }
13260    
13261          public Builder mergeFrom(
13262              com.google.protobuf.CodedInputStream input,
13263              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13264              throws java.io.IOException {
13265            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto parsedMessage = null;
13266            try {
13267              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
13268            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
13269              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto) e.getUnfinishedMessage();
13270              throw e;
13271            } finally {
13272              if (parsedMessage != null) {
13273                mergeFrom(parsedMessage);
13274              }
13275            }
13276            return this;
13277          }
13278    
13279          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.DoUpgradeResponseProto)
13280        }
13281    
13282        static {
13283          defaultInstance = new DoUpgradeResponseProto(true);
13284          defaultInstance.initFields();
13285        }
13286    
13287        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.DoUpgradeResponseProto)
13288      }
13289    
13290      public interface DoFinalizeRequestProtoOrBuilder
13291          extends com.google.protobuf.MessageOrBuilder {
13292    
13293        // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
13294        /**
13295         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
13296         */
13297        boolean hasJid();
13298        /**
13299         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
13300         */
13301        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
13302        /**
13303         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
13304         */
13305        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
13306      }
13307      /**
13308       * Protobuf type {@code hadoop.hdfs.qjournal.DoFinalizeRequestProto}
13309       *
13310       * <pre>
13311       **
13312       * doFinalize()
13313       * </pre>
13314       */
13315      public static final class DoFinalizeRequestProto extends
13316          com.google.protobuf.GeneratedMessage
13317          implements DoFinalizeRequestProtoOrBuilder {
13318        // Use DoFinalizeRequestProto.newBuilder() to construct.
13319        private DoFinalizeRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
13320          super(builder);
13321          this.unknownFields = builder.getUnknownFields();
13322        }
13323        private DoFinalizeRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
13324    
13325        private static final DoFinalizeRequestProto defaultInstance;
13326        public static DoFinalizeRequestProto getDefaultInstance() {
13327          return defaultInstance;
13328        }
13329    
13330        public DoFinalizeRequestProto getDefaultInstanceForType() {
13331          return defaultInstance;
13332        }
13333    
13334        private final com.google.protobuf.UnknownFieldSet unknownFields;
13335        @java.lang.Override
13336        public final com.google.protobuf.UnknownFieldSet
13337            getUnknownFields() {
13338          return this.unknownFields;
13339        }
13340        private DoFinalizeRequestProto(
13341            com.google.protobuf.CodedInputStream input,
13342            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13343            throws com.google.protobuf.InvalidProtocolBufferException {
13344          initFields();
13345          int mutable_bitField0_ = 0;
13346          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
13347              com.google.protobuf.UnknownFieldSet.newBuilder();
13348          try {
13349            boolean done = false;
13350            while (!done) {
13351              int tag = input.readTag();
13352              switch (tag) {
13353                case 0:
13354                  done = true;
13355                  break;
13356                default: {
13357                  if (!parseUnknownField(input, unknownFields,
13358                                         extensionRegistry, tag)) {
13359                    done = true;
13360                  }
13361                  break;
13362                }
13363                case 10: {
13364                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
13365                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
13366                    subBuilder = jid_.toBuilder();
13367                  }
13368                  jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
13369                  if (subBuilder != null) {
13370                    subBuilder.mergeFrom(jid_);
13371                    jid_ = subBuilder.buildPartial();
13372                  }
13373                  bitField0_ |= 0x00000001;
13374                  break;
13375                }
13376              }
13377            }
13378          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
13379            throw e.setUnfinishedMessage(this);
13380          } catch (java.io.IOException e) {
13381            throw new com.google.protobuf.InvalidProtocolBufferException(
13382                e.getMessage()).setUnfinishedMessage(this);
13383          } finally {
13384            this.unknownFields = unknownFields.build();
13385            makeExtensionsImmutable();
13386          }
13387        }
13388        public static final com.google.protobuf.Descriptors.Descriptor
13389            getDescriptor() {
13390          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoFinalizeRequestProto_descriptor;
13391        }
13392    
13393        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
13394            internalGetFieldAccessorTable() {
13395          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoFinalizeRequestProto_fieldAccessorTable
13396              .ensureFieldAccessorsInitialized(
13397                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto.Builder.class);
13398        }
13399    
13400        public static com.google.protobuf.Parser<DoFinalizeRequestProto> PARSER =
13401            new com.google.protobuf.AbstractParser<DoFinalizeRequestProto>() {
13402          public DoFinalizeRequestProto parsePartialFrom(
13403              com.google.protobuf.CodedInputStream input,
13404              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13405              throws com.google.protobuf.InvalidProtocolBufferException {
13406            return new DoFinalizeRequestProto(input, extensionRegistry);
13407          }
13408        };
13409    
13410        @java.lang.Override
13411        public com.google.protobuf.Parser<DoFinalizeRequestProto> getParserForType() {
13412          return PARSER;
13413        }
13414    
13415        private int bitField0_;
13416        // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
13417        public static final int JID_FIELD_NUMBER = 1;
13418        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
13419        /**
13420         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
13421         */
13422        public boolean hasJid() {
13423          return ((bitField0_ & 0x00000001) == 0x00000001);
13424        }
13425        /**
13426         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
13427         */
13428        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
13429          return jid_;
13430        }
13431        /**
13432         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
13433         */
13434        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
13435          return jid_;
13436        }
13437    
13438        private void initFields() {
13439          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
13440        }
13441        private byte memoizedIsInitialized = -1;
13442        public final boolean isInitialized() {
13443          byte isInitialized = memoizedIsInitialized;
13444          if (isInitialized != -1) return isInitialized == 1;
13445    
13446          if (!hasJid()) {
13447            memoizedIsInitialized = 0;
13448            return false;
13449          }
13450          if (!getJid().isInitialized()) {
13451            memoizedIsInitialized = 0;
13452            return false;
13453          }
13454          memoizedIsInitialized = 1;
13455          return true;
13456        }
13457    
13458        public void writeTo(com.google.protobuf.CodedOutputStream output)
13459                            throws java.io.IOException {
13460          getSerializedSize();
13461          if (((bitField0_ & 0x00000001) == 0x00000001)) {
13462            output.writeMessage(1, jid_);
13463          }
13464          getUnknownFields().writeTo(output);
13465        }
13466    
13467        private int memoizedSerializedSize = -1;
13468        public int getSerializedSize() {
13469          int size = memoizedSerializedSize;
13470          if (size != -1) return size;
13471    
13472          size = 0;
13473          if (((bitField0_ & 0x00000001) == 0x00000001)) {
13474            size += com.google.protobuf.CodedOutputStream
13475              .computeMessageSize(1, jid_);
13476          }
13477          size += getUnknownFields().getSerializedSize();
13478          memoizedSerializedSize = size;
13479          return size;
13480        }
13481    
13482        private static final long serialVersionUID = 0L;
13483        @java.lang.Override
13484        protected java.lang.Object writeReplace()
13485            throws java.io.ObjectStreamException {
13486          return super.writeReplace();
13487        }
13488    
13489        @java.lang.Override
13490        public boolean equals(final java.lang.Object obj) {
13491          if (obj == this) {
13492           return true;
13493          }
13494          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto)) {
13495            return super.equals(obj);
13496          }
13497          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto) obj;
13498    
13499          boolean result = true;
13500          result = result && (hasJid() == other.hasJid());
13501          if (hasJid()) {
13502            result = result && getJid()
13503                .equals(other.getJid());
13504          }
13505          result = result &&
13506              getUnknownFields().equals(other.getUnknownFields());
13507          return result;
13508        }
13509    
13510        private int memoizedHashCode = 0;
13511        @java.lang.Override
13512        public int hashCode() {
13513          if (memoizedHashCode != 0) {
13514            return memoizedHashCode;
13515          }
13516          int hash = 41;
13517          hash = (19 * hash) + getDescriptorForType().hashCode();
13518          if (hasJid()) {
13519            hash = (37 * hash) + JID_FIELD_NUMBER;
13520            hash = (53 * hash) + getJid().hashCode();
13521          }
13522          hash = (29 * hash) + getUnknownFields().hashCode();
13523          memoizedHashCode = hash;
13524          return hash;
13525        }
13526    
13527        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto parseFrom(
13528            com.google.protobuf.ByteString data)
13529            throws com.google.protobuf.InvalidProtocolBufferException {
13530          return PARSER.parseFrom(data);
13531        }
13532        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto parseFrom(
13533            com.google.protobuf.ByteString data,
13534            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13535            throws com.google.protobuf.InvalidProtocolBufferException {
13536          return PARSER.parseFrom(data, extensionRegistry);
13537        }
13538        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto parseFrom(byte[] data)
13539            throws com.google.protobuf.InvalidProtocolBufferException {
13540          return PARSER.parseFrom(data);
13541        }
13542        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto parseFrom(
13543            byte[] data,
13544            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13545            throws com.google.protobuf.InvalidProtocolBufferException {
13546          return PARSER.parseFrom(data, extensionRegistry);
13547        }
13548        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto parseFrom(java.io.InputStream input)
13549            throws java.io.IOException {
13550          return PARSER.parseFrom(input);
13551        }
13552        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto parseFrom(
13553            java.io.InputStream input,
13554            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13555            throws java.io.IOException {
13556          return PARSER.parseFrom(input, extensionRegistry);
13557        }
13558        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto parseDelimitedFrom(java.io.InputStream input)
13559            throws java.io.IOException {
13560          return PARSER.parseDelimitedFrom(input);
13561        }
13562        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto parseDelimitedFrom(
13563            java.io.InputStream input,
13564            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13565            throws java.io.IOException {
13566          return PARSER.parseDelimitedFrom(input, extensionRegistry);
13567        }
13568        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto parseFrom(
13569            com.google.protobuf.CodedInputStream input)
13570            throws java.io.IOException {
13571          return PARSER.parseFrom(input);
13572        }
13573        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto parseFrom(
13574            com.google.protobuf.CodedInputStream input,
13575            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13576            throws java.io.IOException {
13577          return PARSER.parseFrom(input, extensionRegistry);
13578        }
13579    
13580        public static Builder newBuilder() { return Builder.create(); }
13581        public Builder newBuilderForType() { return newBuilder(); }
13582        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto prototype) {
13583          return newBuilder().mergeFrom(prototype);
13584        }
13585        public Builder toBuilder() { return newBuilder(this); }
13586    
13587        @java.lang.Override
13588        protected Builder newBuilderForType(
13589            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
13590          Builder builder = new Builder(parent);
13591          return builder;
13592        }
13593        /**
13594         * Protobuf type {@code hadoop.hdfs.qjournal.DoFinalizeRequestProto}
13595         *
13596         * <pre>
13597         **
13598         * doFinalize()
13599         * </pre>
13600         */
13601        public static final class Builder extends
13602            com.google.protobuf.GeneratedMessage.Builder<Builder>
13603           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProtoOrBuilder {
13604          public static final com.google.protobuf.Descriptors.Descriptor
13605              getDescriptor() {
13606            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoFinalizeRequestProto_descriptor;
13607          }
13608    
13609          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
13610              internalGetFieldAccessorTable() {
13611            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoFinalizeRequestProto_fieldAccessorTable
13612                .ensureFieldAccessorsInitialized(
13613                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto.Builder.class);
13614          }
13615    
13616          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto.newBuilder()
13617          private Builder() {
13618            maybeForceBuilderInitialization();
13619          }
13620    
13621          private Builder(
13622              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
13623            super(parent);
13624            maybeForceBuilderInitialization();
13625          }
13626          private void maybeForceBuilderInitialization() {
13627            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
13628              getJidFieldBuilder();
13629            }
13630          }
13631          private static Builder create() {
13632            return new Builder();
13633          }
13634    
13635          public Builder clear() {
13636            super.clear();
13637            if (jidBuilder_ == null) {
13638              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
13639            } else {
13640              jidBuilder_.clear();
13641            }
13642            bitField0_ = (bitField0_ & ~0x00000001);
13643            return this;
13644          }
13645    
13646          public Builder clone() {
13647            return create().mergeFrom(buildPartial());
13648          }
13649    
13650          public com.google.protobuf.Descriptors.Descriptor
13651              getDescriptorForType() {
13652            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoFinalizeRequestProto_descriptor;
13653          }
13654    
13655          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto getDefaultInstanceForType() {
13656            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto.getDefaultInstance();
13657          }
13658    
13659          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto build() {
13660            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto result = buildPartial();
13661            if (!result.isInitialized()) {
13662              throw newUninitializedMessageException(result);
13663            }
13664            return result;
13665          }
13666    
13667          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto buildPartial() {
13668            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto(this);
13669            int from_bitField0_ = bitField0_;
13670            int to_bitField0_ = 0;
13671            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
13672              to_bitField0_ |= 0x00000001;
13673            }
13674            if (jidBuilder_ == null) {
13675              result.jid_ = jid_;
13676            } else {
13677              result.jid_ = jidBuilder_.build();
13678            }
13679            result.bitField0_ = to_bitField0_;
13680            onBuilt();
13681            return result;
13682          }
13683    
13684          public Builder mergeFrom(com.google.protobuf.Message other) {
13685            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto) {
13686              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto)other);
13687            } else {
13688              super.mergeFrom(other);
13689              return this;
13690            }
13691          }
13692    
13693          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto other) {
13694            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto.getDefaultInstance()) return this;
13695            if (other.hasJid()) {
13696              mergeJid(other.getJid());
13697            }
13698            this.mergeUnknownFields(other.getUnknownFields());
13699            return this;
13700          }
13701    
13702          public final boolean isInitialized() {
13703            if (!hasJid()) {
13704              
13705              return false;
13706            }
13707            if (!getJid().isInitialized()) {
13708              
13709              return false;
13710            }
13711            return true;
13712          }
13713    
13714          public Builder mergeFrom(
13715              com.google.protobuf.CodedInputStream input,
13716              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13717              throws java.io.IOException {
13718            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto parsedMessage = null;
13719            try {
13720              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
13721            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
13722              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto) e.getUnfinishedMessage();
13723              throw e;
13724            } finally {
13725              if (parsedMessage != null) {
13726                mergeFrom(parsedMessage);
13727              }
13728            }
13729            return this;
13730          }
13731          private int bitField0_;
13732    
13733          // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
13734          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
13735          private com.google.protobuf.SingleFieldBuilder<
13736              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
13737          /**
13738           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
13739           */
13740          public boolean hasJid() {
13741            return ((bitField0_ & 0x00000001) == 0x00000001);
13742          }
13743          /**
13744           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
13745           */
13746          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
13747            if (jidBuilder_ == null) {
13748              return jid_;
13749            } else {
13750              return jidBuilder_.getMessage();
13751            }
13752          }
13753          /**
13754           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
13755           */
13756          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
13757            if (jidBuilder_ == null) {
13758              if (value == null) {
13759                throw new NullPointerException();
13760              }
13761              jid_ = value;
13762              onChanged();
13763            } else {
13764              jidBuilder_.setMessage(value);
13765            }
13766            bitField0_ |= 0x00000001;
13767            return this;
13768          }
13769          /**
13770           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
13771           */
13772          public Builder setJid(
13773              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
13774            if (jidBuilder_ == null) {
13775              jid_ = builderForValue.build();
13776              onChanged();
13777            } else {
13778              jidBuilder_.setMessage(builderForValue.build());
13779            }
13780            bitField0_ |= 0x00000001;
13781            return this;
13782          }
13783          /**
13784           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
13785           */
13786          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
13787            if (jidBuilder_ == null) {
13788              if (((bitField0_ & 0x00000001) == 0x00000001) &&
13789                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
13790                jid_ =
13791                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
13792              } else {
13793                jid_ = value;
13794              }
13795              onChanged();
13796            } else {
13797              jidBuilder_.mergeFrom(value);
13798            }
13799            bitField0_ |= 0x00000001;
13800            return this;
13801          }
13802          /**
13803           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
13804           */
13805          public Builder clearJid() {
13806            if (jidBuilder_ == null) {
13807              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
13808              onChanged();
13809            } else {
13810              jidBuilder_.clear();
13811            }
13812            bitField0_ = (bitField0_ & ~0x00000001);
13813            return this;
13814          }
13815          /**
13816           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
13817           */
13818          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
13819            bitField0_ |= 0x00000001;
13820            onChanged();
13821            return getJidFieldBuilder().getBuilder();
13822          }
13823          /**
13824           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
13825           */
13826          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
13827            if (jidBuilder_ != null) {
13828              return jidBuilder_.getMessageOrBuilder();
13829            } else {
13830              return jid_;
13831            }
13832          }
13833          /**
13834           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
13835           */
13836          private com.google.protobuf.SingleFieldBuilder<
13837              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
13838              getJidFieldBuilder() {
13839            if (jidBuilder_ == null) {
13840              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
13841                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
13842                      jid_,
13843                      getParentForChildren(),
13844                      isClean());
13845              jid_ = null;
13846            }
13847            return jidBuilder_;
13848          }
13849    
13850          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.DoFinalizeRequestProto)
13851        }
13852    
13853        static {
13854          defaultInstance = new DoFinalizeRequestProto(true);
13855          defaultInstance.initFields();
13856        }
13857    
13858        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.DoFinalizeRequestProto)
13859      }
13860    
13861      public interface DoFinalizeResponseProtoOrBuilder
13862          extends com.google.protobuf.MessageOrBuilder {
13863      }
13864      /**
13865       * Protobuf type {@code hadoop.hdfs.qjournal.DoFinalizeResponseProto}
13866       */
13867      public static final class DoFinalizeResponseProto extends
13868          com.google.protobuf.GeneratedMessage
13869          implements DoFinalizeResponseProtoOrBuilder {
13870        // Use DoFinalizeResponseProto.newBuilder() to construct.
13871        private DoFinalizeResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
13872          super(builder);
13873          this.unknownFields = builder.getUnknownFields();
13874        }
13875        private DoFinalizeResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
13876    
13877        private static final DoFinalizeResponseProto defaultInstance;
13878        public static DoFinalizeResponseProto getDefaultInstance() {
13879          return defaultInstance;
13880        }
13881    
13882        public DoFinalizeResponseProto getDefaultInstanceForType() {
13883          return defaultInstance;
13884        }
13885    
13886        private final com.google.protobuf.UnknownFieldSet unknownFields;
13887        @java.lang.Override
13888        public final com.google.protobuf.UnknownFieldSet
13889            getUnknownFields() {
13890          return this.unknownFields;
13891        }
13892        private DoFinalizeResponseProto(
13893            com.google.protobuf.CodedInputStream input,
13894            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13895            throws com.google.protobuf.InvalidProtocolBufferException {
13896          initFields();
13897          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
13898              com.google.protobuf.UnknownFieldSet.newBuilder();
13899          try {
13900            boolean done = false;
13901            while (!done) {
13902              int tag = input.readTag();
13903              switch (tag) {
13904                case 0:
13905                  done = true;
13906                  break;
13907                default: {
13908                  if (!parseUnknownField(input, unknownFields,
13909                                         extensionRegistry, tag)) {
13910                    done = true;
13911                  }
13912                  break;
13913                }
13914              }
13915            }
13916          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
13917            throw e.setUnfinishedMessage(this);
13918          } catch (java.io.IOException e) {
13919            throw new com.google.protobuf.InvalidProtocolBufferException(
13920                e.getMessage()).setUnfinishedMessage(this);
13921          } finally {
13922            this.unknownFields = unknownFields.build();
13923            makeExtensionsImmutable();
13924          }
13925        }
13926        public static final com.google.protobuf.Descriptors.Descriptor
13927            getDescriptor() {
13928          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoFinalizeResponseProto_descriptor;
13929        }
13930    
13931        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
13932            internalGetFieldAccessorTable() {
13933          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoFinalizeResponseProto_fieldAccessorTable
13934              .ensureFieldAccessorsInitialized(
13935                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto.Builder.class);
13936        }
13937    
13938        public static com.google.protobuf.Parser<DoFinalizeResponseProto> PARSER =
13939            new com.google.protobuf.AbstractParser<DoFinalizeResponseProto>() {
13940          public DoFinalizeResponseProto parsePartialFrom(
13941              com.google.protobuf.CodedInputStream input,
13942              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13943              throws com.google.protobuf.InvalidProtocolBufferException {
13944            return new DoFinalizeResponseProto(input, extensionRegistry);
13945          }
13946        };
13947    
13948        @java.lang.Override
13949        public com.google.protobuf.Parser<DoFinalizeResponseProto> getParserForType() {
13950          return PARSER;
13951        }
13952    
13953        private void initFields() {
13954        }
13955        private byte memoizedIsInitialized = -1;
13956        public final boolean isInitialized() {
13957          byte isInitialized = memoizedIsInitialized;
13958          if (isInitialized != -1) return isInitialized == 1;
13959    
13960          memoizedIsInitialized = 1;
13961          return true;
13962        }
13963    
13964        public void writeTo(com.google.protobuf.CodedOutputStream output)
13965                            throws java.io.IOException {
13966          getSerializedSize();
13967          getUnknownFields().writeTo(output);
13968        }
13969    
13970        private int memoizedSerializedSize = -1;
13971        public int getSerializedSize() {
13972          int size = memoizedSerializedSize;
13973          if (size != -1) return size;
13974    
13975          size = 0;
13976          size += getUnknownFields().getSerializedSize();
13977          memoizedSerializedSize = size;
13978          return size;
13979        }
13980    
13981        private static final long serialVersionUID = 0L;
13982        @java.lang.Override
13983        protected java.lang.Object writeReplace()
13984            throws java.io.ObjectStreamException {
13985          return super.writeReplace();
13986        }
13987    
13988        @java.lang.Override
13989        public boolean equals(final java.lang.Object obj) {
13990          if (obj == this) {
13991           return true;
13992          }
13993          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto)) {
13994            return super.equals(obj);
13995          }
13996          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto) obj;
13997    
13998          boolean result = true;
13999          result = result &&
14000              getUnknownFields().equals(other.getUnknownFields());
14001          return result;
14002        }
14003    
14004        private int memoizedHashCode = 0;
14005        @java.lang.Override
14006        public int hashCode() {
14007          if (memoizedHashCode != 0) {
14008            return memoizedHashCode;
14009          }
14010          int hash = 41;
14011          hash = (19 * hash) + getDescriptorForType().hashCode();
14012          hash = (29 * hash) + getUnknownFields().hashCode();
14013          memoizedHashCode = hash;
14014          return hash;
14015        }
14016    
14017        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto parseFrom(
14018            com.google.protobuf.ByteString data)
14019            throws com.google.protobuf.InvalidProtocolBufferException {
14020          return PARSER.parseFrom(data);
14021        }
14022        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto parseFrom(
14023            com.google.protobuf.ByteString data,
14024            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14025            throws com.google.protobuf.InvalidProtocolBufferException {
14026          return PARSER.parseFrom(data, extensionRegistry);
14027        }
14028        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto parseFrom(byte[] data)
14029            throws com.google.protobuf.InvalidProtocolBufferException {
14030          return PARSER.parseFrom(data);
14031        }
14032        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto parseFrom(
14033            byte[] data,
14034            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14035            throws com.google.protobuf.InvalidProtocolBufferException {
14036          return PARSER.parseFrom(data, extensionRegistry);
14037        }
14038        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto parseFrom(java.io.InputStream input)
14039            throws java.io.IOException {
14040          return PARSER.parseFrom(input);
14041        }
14042        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto parseFrom(
14043            java.io.InputStream input,
14044            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14045            throws java.io.IOException {
14046          return PARSER.parseFrom(input, extensionRegistry);
14047        }
14048        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto parseDelimitedFrom(java.io.InputStream input)
14049            throws java.io.IOException {
14050          return PARSER.parseDelimitedFrom(input);
14051        }
14052        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto parseDelimitedFrom(
14053            java.io.InputStream input,
14054            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14055            throws java.io.IOException {
14056          return PARSER.parseDelimitedFrom(input, extensionRegistry);
14057        }
14058        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto parseFrom(
14059            com.google.protobuf.CodedInputStream input)
14060            throws java.io.IOException {
14061          return PARSER.parseFrom(input);
14062        }
14063        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto parseFrom(
14064            com.google.protobuf.CodedInputStream input,
14065            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14066            throws java.io.IOException {
14067          return PARSER.parseFrom(input, extensionRegistry);
14068        }
14069    
14070        public static Builder newBuilder() { return Builder.create(); }
14071        public Builder newBuilderForType() { return newBuilder(); }
14072        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto prototype) {
14073          return newBuilder().mergeFrom(prototype);
14074        }
14075        public Builder toBuilder() { return newBuilder(this); }
14076    
14077        @java.lang.Override
14078        protected Builder newBuilderForType(
14079            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
14080          Builder builder = new Builder(parent);
14081          return builder;
14082        }
14083        /**
14084         * Protobuf type {@code hadoop.hdfs.qjournal.DoFinalizeResponseProto}
14085         */
14086        public static final class Builder extends
14087            com.google.protobuf.GeneratedMessage.Builder<Builder>
14088           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProtoOrBuilder {
14089          public static final com.google.protobuf.Descriptors.Descriptor
14090              getDescriptor() {
14091            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoFinalizeResponseProto_descriptor;
14092          }
14093    
14094          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
14095              internalGetFieldAccessorTable() {
14096            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoFinalizeResponseProto_fieldAccessorTable
14097                .ensureFieldAccessorsInitialized(
14098                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto.Builder.class);
14099          }
14100    
14101          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto.newBuilder()
14102          private Builder() {
14103            maybeForceBuilderInitialization();
14104          }
14105    
14106          private Builder(
14107              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
14108            super(parent);
14109            maybeForceBuilderInitialization();
14110          }
14111          private void maybeForceBuilderInitialization() {
14112            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
14113            }
14114          }
14115          private static Builder create() {
14116            return new Builder();
14117          }
14118    
14119          public Builder clear() {
14120            super.clear();
14121            return this;
14122          }
14123    
14124          public Builder clone() {
14125            return create().mergeFrom(buildPartial());
14126          }
14127    
14128          public com.google.protobuf.Descriptors.Descriptor
14129              getDescriptorForType() {
14130            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoFinalizeResponseProto_descriptor;
14131          }
14132    
14133          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto getDefaultInstanceForType() {
14134            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto.getDefaultInstance();
14135          }
14136    
14137          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto build() {
14138            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto result = buildPartial();
14139            if (!result.isInitialized()) {
14140              throw newUninitializedMessageException(result);
14141            }
14142            return result;
14143          }
14144    
14145          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto buildPartial() {
14146            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto(this);
14147            onBuilt();
14148            return result;
14149          }
14150    
14151          public Builder mergeFrom(com.google.protobuf.Message other) {
14152            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto) {
14153              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto)other);
14154            } else {
14155              super.mergeFrom(other);
14156              return this;
14157            }
14158          }
14159    
14160          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto other) {
14161            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto.getDefaultInstance()) return this;
14162            this.mergeUnknownFields(other.getUnknownFields());
14163            return this;
14164          }
14165    
14166          public final boolean isInitialized() {
14167            return true;
14168          }
14169    
14170          public Builder mergeFrom(
14171              com.google.protobuf.CodedInputStream input,
14172              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14173              throws java.io.IOException {
14174            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto parsedMessage = null;
14175            try {
14176              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
14177            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
14178              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto) e.getUnfinishedMessage();
14179              throw e;
14180            } finally {
14181              if (parsedMessage != null) {
14182                mergeFrom(parsedMessage);
14183              }
14184            }
14185            return this;
14186          }
14187    
14188          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.DoFinalizeResponseProto)
14189        }
14190    
14191        static {
14192          defaultInstance = new DoFinalizeResponseProto(true);
14193          defaultInstance.initFields();
14194        }
14195    
14196        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.DoFinalizeResponseProto)
14197      }
14198    
14199      public interface CanRollBackRequestProtoOrBuilder
14200          extends com.google.protobuf.MessageOrBuilder {
14201    
14202        // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
14203        /**
14204         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
14205         */
14206        boolean hasJid();
14207        /**
14208         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
14209         */
14210        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
14211        /**
14212         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
14213         */
14214        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
14215    
14216        // required .hadoop.hdfs.StorageInfoProto storage = 2;
14217        /**
14218         * <code>required .hadoop.hdfs.StorageInfoProto storage = 2;</code>
14219         */
14220        boolean hasStorage();
14221        /**
14222         * <code>required .hadoop.hdfs.StorageInfoProto storage = 2;</code>
14223         */
14224        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorage();
14225        /**
14226         * <code>required .hadoop.hdfs.StorageInfoProto storage = 2;</code>
14227         */
14228        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageOrBuilder();
14229    
14230        // required .hadoop.hdfs.StorageInfoProto prevStorage = 3;
14231        /**
14232         * <code>required .hadoop.hdfs.StorageInfoProto prevStorage = 3;</code>
14233         */
14234        boolean hasPrevStorage();
14235        /**
14236         * <code>required .hadoop.hdfs.StorageInfoProto prevStorage = 3;</code>
14237         */
14238        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getPrevStorage();
14239        /**
14240         * <code>required .hadoop.hdfs.StorageInfoProto prevStorage = 3;</code>
14241         */
14242        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getPrevStorageOrBuilder();
14243    
14244        // required int32 targetLayoutVersion = 4;
14245        /**
14246         * <code>required int32 targetLayoutVersion = 4;</code>
14247         */
14248        boolean hasTargetLayoutVersion();
14249        /**
14250         * <code>required int32 targetLayoutVersion = 4;</code>
14251         */
14252        int getTargetLayoutVersion();
14253      }
14254      /**
14255       * Protobuf type {@code hadoop.hdfs.qjournal.CanRollBackRequestProto}
14256       *
14257       * <pre>
14258       **
14259       * canRollBack()
14260       * </pre>
14261       */
14262      public static final class CanRollBackRequestProto extends
14263          com.google.protobuf.GeneratedMessage
14264          implements CanRollBackRequestProtoOrBuilder {
14265        // Use CanRollBackRequestProto.newBuilder() to construct.
14266        private CanRollBackRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
14267          super(builder);
14268          this.unknownFields = builder.getUnknownFields();
14269        }
14270        private CanRollBackRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
14271    
14272        private static final CanRollBackRequestProto defaultInstance;
14273        public static CanRollBackRequestProto getDefaultInstance() {
14274          return defaultInstance;
14275        }
14276    
14277        public CanRollBackRequestProto getDefaultInstanceForType() {
14278          return defaultInstance;
14279        }
14280    
14281        private final com.google.protobuf.UnknownFieldSet unknownFields;
14282        @java.lang.Override
14283        public final com.google.protobuf.UnknownFieldSet
14284            getUnknownFields() {
14285          return this.unknownFields;
14286        }
14287        private CanRollBackRequestProto(
14288            com.google.protobuf.CodedInputStream input,
14289            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14290            throws com.google.protobuf.InvalidProtocolBufferException {
14291          initFields();
14292          int mutable_bitField0_ = 0;
14293          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
14294              com.google.protobuf.UnknownFieldSet.newBuilder();
14295          try {
14296            boolean done = false;
14297            while (!done) {
14298              int tag = input.readTag();
14299              switch (tag) {
14300                case 0:
14301                  done = true;
14302                  break;
14303                default: {
14304                  if (!parseUnknownField(input, unknownFields,
14305                                         extensionRegistry, tag)) {
14306                    done = true;
14307                  }
14308                  break;
14309                }
14310                case 10: {
14311                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
14312                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
14313                    subBuilder = jid_.toBuilder();
14314                  }
14315                  jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
14316                  if (subBuilder != null) {
14317                    subBuilder.mergeFrom(jid_);
14318                    jid_ = subBuilder.buildPartial();
14319                  }
14320                  bitField0_ |= 0x00000001;
14321                  break;
14322                }
14323                case 18: {
14324                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder subBuilder = null;
14325                  if (((bitField0_ & 0x00000002) == 0x00000002)) {
14326                    subBuilder = storage_.toBuilder();
14327                  }
14328                  storage_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.PARSER, extensionRegistry);
14329                  if (subBuilder != null) {
14330                    subBuilder.mergeFrom(storage_);
14331                    storage_ = subBuilder.buildPartial();
14332                  }
14333                  bitField0_ |= 0x00000002;
14334                  break;
14335                }
14336                case 26: {
14337                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder subBuilder = null;
14338                  if (((bitField0_ & 0x00000004) == 0x00000004)) {
14339                    subBuilder = prevStorage_.toBuilder();
14340                  }
14341                  prevStorage_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.PARSER, extensionRegistry);
14342                  if (subBuilder != null) {
14343                    subBuilder.mergeFrom(prevStorage_);
14344                    prevStorage_ = subBuilder.buildPartial();
14345                  }
14346                  bitField0_ |= 0x00000004;
14347                  break;
14348                }
14349                case 32: {
14350                  bitField0_ |= 0x00000008;
14351                  targetLayoutVersion_ = input.readInt32();
14352                  break;
14353                }
14354              }
14355            }
14356          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
14357            throw e.setUnfinishedMessage(this);
14358          } catch (java.io.IOException e) {
14359            throw new com.google.protobuf.InvalidProtocolBufferException(
14360                e.getMessage()).setUnfinishedMessage(this);
14361          } finally {
14362            this.unknownFields = unknownFields.build();
14363            makeExtensionsImmutable();
14364          }
14365        }
14366        public static final com.google.protobuf.Descriptors.Descriptor
14367            getDescriptor() {
14368          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_CanRollBackRequestProto_descriptor;
14369        }
14370    
14371        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
14372            internalGetFieldAccessorTable() {
14373          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_CanRollBackRequestProto_fieldAccessorTable
14374              .ensureFieldAccessorsInitialized(
14375                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto.Builder.class);
14376        }
14377    
14378        public static com.google.protobuf.Parser<CanRollBackRequestProto> PARSER =
14379            new com.google.protobuf.AbstractParser<CanRollBackRequestProto>() {
14380          public CanRollBackRequestProto parsePartialFrom(
14381              com.google.protobuf.CodedInputStream input,
14382              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14383              throws com.google.protobuf.InvalidProtocolBufferException {
14384            return new CanRollBackRequestProto(input, extensionRegistry);
14385          }
14386        };
14387    
14388        @java.lang.Override
14389        public com.google.protobuf.Parser<CanRollBackRequestProto> getParserForType() {
14390          return PARSER;
14391        }
14392    
14393        private int bitField0_;
14394        // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
14395        public static final int JID_FIELD_NUMBER = 1;
14396        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
14397        /**
14398         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
14399         */
14400        public boolean hasJid() {
14401          return ((bitField0_ & 0x00000001) == 0x00000001);
14402        }
14403        /**
14404         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
14405         */
14406        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
14407          return jid_;
14408        }
14409        /**
14410         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
14411         */
14412        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
14413          return jid_;
14414        }
14415    
14416        // required .hadoop.hdfs.StorageInfoProto storage = 2;
14417        public static final int STORAGE_FIELD_NUMBER = 2;
14418        private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storage_;
14419        /**
14420         * <code>required .hadoop.hdfs.StorageInfoProto storage = 2;</code>
14421         */
14422        public boolean hasStorage() {
14423          return ((bitField0_ & 0x00000002) == 0x00000002);
14424        }
14425        /**
14426         * <code>required .hadoop.hdfs.StorageInfoProto storage = 2;</code>
14427         */
14428        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorage() {
14429          return storage_;
14430        }
14431        /**
14432         * <code>required .hadoop.hdfs.StorageInfoProto storage = 2;</code>
14433         */
14434        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageOrBuilder() {
14435          return storage_;
14436        }
14437    
14438        // required .hadoop.hdfs.StorageInfoProto prevStorage = 3;
14439        public static final int PREVSTORAGE_FIELD_NUMBER = 3;
14440        private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto prevStorage_;
14441        /**
14442         * <code>required .hadoop.hdfs.StorageInfoProto prevStorage = 3;</code>
14443         */
14444        public boolean hasPrevStorage() {
14445          return ((bitField0_ & 0x00000004) == 0x00000004);
14446        }
14447        /**
14448         * <code>required .hadoop.hdfs.StorageInfoProto prevStorage = 3;</code>
14449         */
14450        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getPrevStorage() {
14451          return prevStorage_;
14452        }
14453        /**
14454         * <code>required .hadoop.hdfs.StorageInfoProto prevStorage = 3;</code>
14455         */
14456        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getPrevStorageOrBuilder() {
14457          return prevStorage_;
14458        }
14459    
14460        // required int32 targetLayoutVersion = 4;
14461        public static final int TARGETLAYOUTVERSION_FIELD_NUMBER = 4;
14462        private int targetLayoutVersion_;
14463        /**
14464         * <code>required int32 targetLayoutVersion = 4;</code>
14465         */
14466        public boolean hasTargetLayoutVersion() {
14467          return ((bitField0_ & 0x00000008) == 0x00000008);
14468        }
14469        /**
14470         * <code>required int32 targetLayoutVersion = 4;</code>
14471         */
14472        public int getTargetLayoutVersion() {
14473          return targetLayoutVersion_;
14474        }
14475    
14476        private void initFields() {
14477          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
14478          storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
14479          prevStorage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
14480          targetLayoutVersion_ = 0;
14481        }
14482        private byte memoizedIsInitialized = -1;
14483        public final boolean isInitialized() {
14484          byte isInitialized = memoizedIsInitialized;
14485          if (isInitialized != -1) return isInitialized == 1;
14486    
14487          if (!hasJid()) {
14488            memoizedIsInitialized = 0;
14489            return false;
14490          }
14491          if (!hasStorage()) {
14492            memoizedIsInitialized = 0;
14493            return false;
14494          }
14495          if (!hasPrevStorage()) {
14496            memoizedIsInitialized = 0;
14497            return false;
14498          }
14499          if (!hasTargetLayoutVersion()) {
14500            memoizedIsInitialized = 0;
14501            return false;
14502          }
14503          if (!getJid().isInitialized()) {
14504            memoizedIsInitialized = 0;
14505            return false;
14506          }
14507          if (!getStorage().isInitialized()) {
14508            memoizedIsInitialized = 0;
14509            return false;
14510          }
14511          if (!getPrevStorage().isInitialized()) {
14512            memoizedIsInitialized = 0;
14513            return false;
14514          }
14515          memoizedIsInitialized = 1;
14516          return true;
14517        }
14518    
14519        public void writeTo(com.google.protobuf.CodedOutputStream output)
14520                            throws java.io.IOException {
14521          getSerializedSize();
14522          if (((bitField0_ & 0x00000001) == 0x00000001)) {
14523            output.writeMessage(1, jid_);
14524          }
14525          if (((bitField0_ & 0x00000002) == 0x00000002)) {
14526            output.writeMessage(2, storage_);
14527          }
14528          if (((bitField0_ & 0x00000004) == 0x00000004)) {
14529            output.writeMessage(3, prevStorage_);
14530          }
14531          if (((bitField0_ & 0x00000008) == 0x00000008)) {
14532            output.writeInt32(4, targetLayoutVersion_);
14533          }
14534          getUnknownFields().writeTo(output);
14535        }
14536    
14537        private int memoizedSerializedSize = -1;
14538        public int getSerializedSize() {
14539          int size = memoizedSerializedSize;
14540          if (size != -1) return size;
14541    
14542          size = 0;
14543          if (((bitField0_ & 0x00000001) == 0x00000001)) {
14544            size += com.google.protobuf.CodedOutputStream
14545              .computeMessageSize(1, jid_);
14546          }
14547          if (((bitField0_ & 0x00000002) == 0x00000002)) {
14548            size += com.google.protobuf.CodedOutputStream
14549              .computeMessageSize(2, storage_);
14550          }
14551          if (((bitField0_ & 0x00000004) == 0x00000004)) {
14552            size += com.google.protobuf.CodedOutputStream
14553              .computeMessageSize(3, prevStorage_);
14554          }
14555          if (((bitField0_ & 0x00000008) == 0x00000008)) {
14556            size += com.google.protobuf.CodedOutputStream
14557              .computeInt32Size(4, targetLayoutVersion_);
14558          }
14559          size += getUnknownFields().getSerializedSize();
14560          memoizedSerializedSize = size;
14561          return size;
14562        }
14563    
14564        private static final long serialVersionUID = 0L;
14565        @java.lang.Override
14566        protected java.lang.Object writeReplace()
14567            throws java.io.ObjectStreamException {
14568          return super.writeReplace();
14569        }
14570    
14571        @java.lang.Override
14572        public boolean equals(final java.lang.Object obj) {
14573          if (obj == this) {
14574           return true;
14575          }
14576          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto)) {
14577            return super.equals(obj);
14578          }
14579          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto) obj;
14580    
14581          boolean result = true;
14582          result = result && (hasJid() == other.hasJid());
14583          if (hasJid()) {
14584            result = result && getJid()
14585                .equals(other.getJid());
14586          }
14587          result = result && (hasStorage() == other.hasStorage());
14588          if (hasStorage()) {
14589            result = result && getStorage()
14590                .equals(other.getStorage());
14591          }
14592          result = result && (hasPrevStorage() == other.hasPrevStorage());
14593          if (hasPrevStorage()) {
14594            result = result && getPrevStorage()
14595                .equals(other.getPrevStorage());
14596          }
14597          result = result && (hasTargetLayoutVersion() == other.hasTargetLayoutVersion());
14598          if (hasTargetLayoutVersion()) {
14599            result = result && (getTargetLayoutVersion()
14600                == other.getTargetLayoutVersion());
14601          }
14602          result = result &&
14603              getUnknownFields().equals(other.getUnknownFields());
14604          return result;
14605        }
14606    
14607        private int memoizedHashCode = 0;
14608        @java.lang.Override
14609        public int hashCode() {
14610          if (memoizedHashCode != 0) {
14611            return memoizedHashCode;
14612          }
14613          int hash = 41;
14614          hash = (19 * hash) + getDescriptorForType().hashCode();
14615          if (hasJid()) {
14616            hash = (37 * hash) + JID_FIELD_NUMBER;
14617            hash = (53 * hash) + getJid().hashCode();
14618          }
14619          if (hasStorage()) {
14620            hash = (37 * hash) + STORAGE_FIELD_NUMBER;
14621            hash = (53 * hash) + getStorage().hashCode();
14622          }
14623          if (hasPrevStorage()) {
14624            hash = (37 * hash) + PREVSTORAGE_FIELD_NUMBER;
14625            hash = (53 * hash) + getPrevStorage().hashCode();
14626          }
14627          if (hasTargetLayoutVersion()) {
14628            hash = (37 * hash) + TARGETLAYOUTVERSION_FIELD_NUMBER;
14629            hash = (53 * hash) + getTargetLayoutVersion();
14630          }
14631          hash = (29 * hash) + getUnknownFields().hashCode();
14632          memoizedHashCode = hash;
14633          return hash;
14634        }
14635    
14636        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto parseFrom(
14637            com.google.protobuf.ByteString data)
14638            throws com.google.protobuf.InvalidProtocolBufferException {
14639          return PARSER.parseFrom(data);
14640        }
14641        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto parseFrom(
14642            com.google.protobuf.ByteString data,
14643            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14644            throws com.google.protobuf.InvalidProtocolBufferException {
14645          return PARSER.parseFrom(data, extensionRegistry);
14646        }
14647        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto parseFrom(byte[] data)
14648            throws com.google.protobuf.InvalidProtocolBufferException {
14649          return PARSER.parseFrom(data);
14650        }
14651        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto parseFrom(
14652            byte[] data,
14653            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14654            throws com.google.protobuf.InvalidProtocolBufferException {
14655          return PARSER.parseFrom(data, extensionRegistry);
14656        }
14657        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto parseFrom(java.io.InputStream input)
14658            throws java.io.IOException {
14659          return PARSER.parseFrom(input);
14660        }
14661        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto parseFrom(
14662            java.io.InputStream input,
14663            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14664            throws java.io.IOException {
14665          return PARSER.parseFrom(input, extensionRegistry);
14666        }
14667        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto parseDelimitedFrom(java.io.InputStream input)
14668            throws java.io.IOException {
14669          return PARSER.parseDelimitedFrom(input);
14670        }
14671        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto parseDelimitedFrom(
14672            java.io.InputStream input,
14673            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14674            throws java.io.IOException {
14675          return PARSER.parseDelimitedFrom(input, extensionRegistry);
14676        }
14677        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto parseFrom(
14678            com.google.protobuf.CodedInputStream input)
14679            throws java.io.IOException {
14680          return PARSER.parseFrom(input);
14681        }
14682        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto parseFrom(
14683            com.google.protobuf.CodedInputStream input,
14684            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14685            throws java.io.IOException {
14686          return PARSER.parseFrom(input, extensionRegistry);
14687        }
14688    
14689        public static Builder newBuilder() { return Builder.create(); }
14690        public Builder newBuilderForType() { return newBuilder(); }
14691        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto prototype) {
14692          return newBuilder().mergeFrom(prototype);
14693        }
14694        public Builder toBuilder() { return newBuilder(this); }
14695    
14696        @java.lang.Override
14697        protected Builder newBuilderForType(
14698            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
14699          Builder builder = new Builder(parent);
14700          return builder;
14701        }
14702        /**
14703         * Protobuf type {@code hadoop.hdfs.qjournal.CanRollBackRequestProto}
14704         *
14705         * <pre>
14706         **
14707         * canRollBack()
14708         * </pre>
14709         */
14710        public static final class Builder extends
14711            com.google.protobuf.GeneratedMessage.Builder<Builder>
14712           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProtoOrBuilder {
14713          public static final com.google.protobuf.Descriptors.Descriptor
14714              getDescriptor() {
14715            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_CanRollBackRequestProto_descriptor;
14716          }
14717    
14718          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
14719              internalGetFieldAccessorTable() {
14720            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_CanRollBackRequestProto_fieldAccessorTable
14721                .ensureFieldAccessorsInitialized(
14722                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto.Builder.class);
14723          }
14724    
14725          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto.newBuilder()
14726          private Builder() {
14727            maybeForceBuilderInitialization();
14728          }
14729    
14730          private Builder(
14731              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
14732            super(parent);
14733            maybeForceBuilderInitialization();
14734          }
14735          private void maybeForceBuilderInitialization() {
14736            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
14737              getJidFieldBuilder();
14738              getStorageFieldBuilder();
14739              getPrevStorageFieldBuilder();
14740            }
14741          }
14742          private static Builder create() {
14743            return new Builder();
14744          }
14745    
14746          public Builder clear() {
14747            super.clear();
14748            if (jidBuilder_ == null) {
14749              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
14750            } else {
14751              jidBuilder_.clear();
14752            }
14753            bitField0_ = (bitField0_ & ~0x00000001);
14754            if (storageBuilder_ == null) {
14755              storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
14756            } else {
14757              storageBuilder_.clear();
14758            }
14759            bitField0_ = (bitField0_ & ~0x00000002);
14760            if (prevStorageBuilder_ == null) {
14761              prevStorage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
14762            } else {
14763              prevStorageBuilder_.clear();
14764            }
14765            bitField0_ = (bitField0_ & ~0x00000004);
14766            targetLayoutVersion_ = 0;
14767            bitField0_ = (bitField0_ & ~0x00000008);
14768            return this;
14769          }
14770    
14771          public Builder clone() {
14772            return create().mergeFrom(buildPartial());
14773          }
14774    
14775          public com.google.protobuf.Descriptors.Descriptor
14776              getDescriptorForType() {
14777            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_CanRollBackRequestProto_descriptor;
14778          }
14779    
14780          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto getDefaultInstanceForType() {
14781            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto.getDefaultInstance();
14782          }
14783    
14784          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto build() {
14785            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto result = buildPartial();
14786            if (!result.isInitialized()) {
14787              throw newUninitializedMessageException(result);
14788            }
14789            return result;
14790          }
14791    
14792          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto buildPartial() {
14793            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto(this);
14794            int from_bitField0_ = bitField0_;
14795            int to_bitField0_ = 0;
14796            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
14797              to_bitField0_ |= 0x00000001;
14798            }
14799            if (jidBuilder_ == null) {
14800              result.jid_ = jid_;
14801            } else {
14802              result.jid_ = jidBuilder_.build();
14803            }
14804            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
14805              to_bitField0_ |= 0x00000002;
14806            }
14807            if (storageBuilder_ == null) {
14808              result.storage_ = storage_;
14809            } else {
14810              result.storage_ = storageBuilder_.build();
14811            }
14812            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
14813              to_bitField0_ |= 0x00000004;
14814            }
14815            if (prevStorageBuilder_ == null) {
14816              result.prevStorage_ = prevStorage_;
14817            } else {
14818              result.prevStorage_ = prevStorageBuilder_.build();
14819            }
14820            if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
14821              to_bitField0_ |= 0x00000008;
14822            }
14823            result.targetLayoutVersion_ = targetLayoutVersion_;
14824            result.bitField0_ = to_bitField0_;
14825            onBuilt();
14826            return result;
14827          }
14828    
14829          public Builder mergeFrom(com.google.protobuf.Message other) {
14830            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto) {
14831              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto)other);
14832            } else {
14833              super.mergeFrom(other);
14834              return this;
14835            }
14836          }
14837    
14838          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto other) {
14839            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto.getDefaultInstance()) return this;
14840            if (other.hasJid()) {
14841              mergeJid(other.getJid());
14842            }
14843            if (other.hasStorage()) {
14844              mergeStorage(other.getStorage());
14845            }
14846            if (other.hasPrevStorage()) {
14847              mergePrevStorage(other.getPrevStorage());
14848            }
14849            if (other.hasTargetLayoutVersion()) {
14850              setTargetLayoutVersion(other.getTargetLayoutVersion());
14851            }
14852            this.mergeUnknownFields(other.getUnknownFields());
14853            return this;
14854          }
14855    
14856          public final boolean isInitialized() {
14857            if (!hasJid()) {
14858              
14859              return false;
14860            }
14861            if (!hasStorage()) {
14862              
14863              return false;
14864            }
14865            if (!hasPrevStorage()) {
14866              
14867              return false;
14868            }
14869            if (!hasTargetLayoutVersion()) {
14870              
14871              return false;
14872            }
14873            if (!getJid().isInitialized()) {
14874              
14875              return false;
14876            }
14877            if (!getStorage().isInitialized()) {
14878              
14879              return false;
14880            }
14881            if (!getPrevStorage().isInitialized()) {
14882              
14883              return false;
14884            }
14885            return true;
14886          }
14887    
14888          public Builder mergeFrom(
14889              com.google.protobuf.CodedInputStream input,
14890              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
14891              throws java.io.IOException {
14892            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto parsedMessage = null;
14893            try {
14894              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
14895            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
14896              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto) e.getUnfinishedMessage();
14897              throw e;
14898            } finally {
14899              if (parsedMessage != null) {
14900                mergeFrom(parsedMessage);
14901              }
14902            }
14903            return this;
14904          }
14905          private int bitField0_;
14906    
14907          // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
14908          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
14909          private com.google.protobuf.SingleFieldBuilder<
14910              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
14911          /**
14912           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
14913           */
14914          public boolean hasJid() {
14915            return ((bitField0_ & 0x00000001) == 0x00000001);
14916          }
14917          /**
14918           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
14919           */
14920          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
14921            if (jidBuilder_ == null) {
14922              return jid_;
14923            } else {
14924              return jidBuilder_.getMessage();
14925            }
14926          }
14927          /**
14928           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
14929           */
14930          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
14931            if (jidBuilder_ == null) {
14932              if (value == null) {
14933                throw new NullPointerException();
14934              }
14935              jid_ = value;
14936              onChanged();
14937            } else {
14938              jidBuilder_.setMessage(value);
14939            }
14940            bitField0_ |= 0x00000001;
14941            return this;
14942          }
14943          /**
14944           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
14945           */
14946          public Builder setJid(
14947              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
14948            if (jidBuilder_ == null) {
14949              jid_ = builderForValue.build();
14950              onChanged();
14951            } else {
14952              jidBuilder_.setMessage(builderForValue.build());
14953            }
14954            bitField0_ |= 0x00000001;
14955            return this;
14956          }
14957          /**
14958           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
14959           */
14960          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
14961            if (jidBuilder_ == null) {
14962              if (((bitField0_ & 0x00000001) == 0x00000001) &&
14963                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
14964                jid_ =
14965                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
14966              } else {
14967                jid_ = value;
14968              }
14969              onChanged();
14970            } else {
14971              jidBuilder_.mergeFrom(value);
14972            }
14973            bitField0_ |= 0x00000001;
14974            return this;
14975          }
14976          /**
14977           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
14978           */
14979          public Builder clearJid() {
14980            if (jidBuilder_ == null) {
14981              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
14982              onChanged();
14983            } else {
14984              jidBuilder_.clear();
14985            }
14986            bitField0_ = (bitField0_ & ~0x00000001);
14987            return this;
14988          }
14989          /**
14990           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
14991           */
14992          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
14993            bitField0_ |= 0x00000001;
14994            onChanged();
14995            return getJidFieldBuilder().getBuilder();
14996          }
14997          /**
14998           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
14999           */
15000          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
15001            if (jidBuilder_ != null) {
15002              return jidBuilder_.getMessageOrBuilder();
15003            } else {
15004              return jid_;
15005            }
15006          }
15007          /**
15008           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
15009           */
15010          private com.google.protobuf.SingleFieldBuilder<
15011              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
15012              getJidFieldBuilder() {
15013            if (jidBuilder_ == null) {
15014              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
15015                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
15016                      jid_,
15017                      getParentForChildren(),
15018                      isClean());
15019              jid_ = null;
15020            }
15021            return jidBuilder_;
15022          }
15023    
15024          // required .hadoop.hdfs.StorageInfoProto storage = 2;
15025          private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
15026          private com.google.protobuf.SingleFieldBuilder<
15027              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> storageBuilder_;
15028          /**
15029           * <code>required .hadoop.hdfs.StorageInfoProto storage = 2;</code>
15030           */
15031          public boolean hasStorage() {
15032            return ((bitField0_ & 0x00000002) == 0x00000002);
15033          }
15034          /**
15035           * <code>required .hadoop.hdfs.StorageInfoProto storage = 2;</code>
15036           */
15037          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getStorage() {
15038            if (storageBuilder_ == null) {
15039              return storage_;
15040            } else {
15041              return storageBuilder_.getMessage();
15042            }
15043          }
15044          /**
15045           * <code>required .hadoop.hdfs.StorageInfoProto storage = 2;</code>
15046           */
15047          public Builder setStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) {
15048            if (storageBuilder_ == null) {
15049              if (value == null) {
15050                throw new NullPointerException();
15051              }
15052              storage_ = value;
15053              onChanged();
15054            } else {
15055              storageBuilder_.setMessage(value);
15056            }
15057            bitField0_ |= 0x00000002;
15058            return this;
15059          }
15060          /**
15061           * <code>required .hadoop.hdfs.StorageInfoProto storage = 2;</code>
15062           */
15063          public Builder setStorage(
15064              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder builderForValue) {
15065            if (storageBuilder_ == null) {
15066              storage_ = builderForValue.build();
15067              onChanged();
15068            } else {
15069              storageBuilder_.setMessage(builderForValue.build());
15070            }
15071            bitField0_ |= 0x00000002;
15072            return this;
15073          }
15074          /**
15075           * <code>required .hadoop.hdfs.StorageInfoProto storage = 2;</code>
15076           */
15077          public Builder mergeStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) {
15078            if (storageBuilder_ == null) {
15079              if (((bitField0_ & 0x00000002) == 0x00000002) &&
15080                  storage_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance()) {
15081                storage_ =
15082                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder(storage_).mergeFrom(value).buildPartial();
15083              } else {
15084                storage_ = value;
15085              }
15086              onChanged();
15087            } else {
15088              storageBuilder_.mergeFrom(value);
15089            }
15090            bitField0_ |= 0x00000002;
15091            return this;
15092          }
15093          /**
15094           * <code>required .hadoop.hdfs.StorageInfoProto storage = 2;</code>
15095           */
15096          public Builder clearStorage() {
15097            if (storageBuilder_ == null) {
15098              storage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
15099              onChanged();
15100            } else {
15101              storageBuilder_.clear();
15102            }
15103            bitField0_ = (bitField0_ & ~0x00000002);
15104            return this;
15105          }
15106          /**
15107           * <code>required .hadoop.hdfs.StorageInfoProto storage = 2;</code>
15108           */
15109          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder getStorageBuilder() {
15110            bitField0_ |= 0x00000002;
15111            onChanged();
15112            return getStorageFieldBuilder().getBuilder();
15113          }
15114          /**
15115           * <code>required .hadoop.hdfs.StorageInfoProto storage = 2;</code>
15116           */
15117          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getStorageOrBuilder() {
15118            if (storageBuilder_ != null) {
15119              return storageBuilder_.getMessageOrBuilder();
15120            } else {
15121              return storage_;
15122            }
15123          }
15124          /**
15125           * <code>required .hadoop.hdfs.StorageInfoProto storage = 2;</code>
15126           */
15127          private com.google.protobuf.SingleFieldBuilder<
15128              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> 
15129              getStorageFieldBuilder() {
15130            if (storageBuilder_ == null) {
15131              storageBuilder_ = new com.google.protobuf.SingleFieldBuilder<
15132                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder>(
15133                      storage_,
15134                      getParentForChildren(),
15135                      isClean());
15136              storage_ = null;
15137            }
15138            return storageBuilder_;
15139          }
15140    
15141          // required .hadoop.hdfs.StorageInfoProto prevStorage = 3;
15142          private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto prevStorage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
15143          private com.google.protobuf.SingleFieldBuilder<
15144              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> prevStorageBuilder_;
15145          /**
15146           * <code>required .hadoop.hdfs.StorageInfoProto prevStorage = 3;</code>
15147           */
15148          public boolean hasPrevStorage() {
15149            return ((bitField0_ & 0x00000004) == 0x00000004);
15150          }
15151          /**
15152           * <code>required .hadoop.hdfs.StorageInfoProto prevStorage = 3;</code>
15153           */
15154          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto getPrevStorage() {
15155            if (prevStorageBuilder_ == null) {
15156              return prevStorage_;
15157            } else {
15158              return prevStorageBuilder_.getMessage();
15159            }
15160          }
15161          /**
15162           * <code>required .hadoop.hdfs.StorageInfoProto prevStorage = 3;</code>
15163           */
15164          public Builder setPrevStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) {
15165            if (prevStorageBuilder_ == null) {
15166              if (value == null) {
15167                throw new NullPointerException();
15168              }
15169              prevStorage_ = value;
15170              onChanged();
15171            } else {
15172              prevStorageBuilder_.setMessage(value);
15173            }
15174            bitField0_ |= 0x00000004;
15175            return this;
15176          }
15177          /**
15178           * <code>required .hadoop.hdfs.StorageInfoProto prevStorage = 3;</code>
15179           */
15180          public Builder setPrevStorage(
15181              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder builderForValue) {
15182            if (prevStorageBuilder_ == null) {
15183              prevStorage_ = builderForValue.build();
15184              onChanged();
15185            } else {
15186              prevStorageBuilder_.setMessage(builderForValue.build());
15187            }
15188            bitField0_ |= 0x00000004;
15189            return this;
15190          }
15191          /**
15192           * <code>required .hadoop.hdfs.StorageInfoProto prevStorage = 3;</code>
15193           */
15194          public Builder mergePrevStorage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto value) {
15195            if (prevStorageBuilder_ == null) {
15196              if (((bitField0_ & 0x00000004) == 0x00000004) &&
15197                  prevStorage_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance()) {
15198                prevStorage_ =
15199                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.newBuilder(prevStorage_).mergeFrom(value).buildPartial();
15200              } else {
15201                prevStorage_ = value;
15202              }
15203              onChanged();
15204            } else {
15205              prevStorageBuilder_.mergeFrom(value);
15206            }
15207            bitField0_ |= 0x00000004;
15208            return this;
15209          }
15210          /**
15211           * <code>required .hadoop.hdfs.StorageInfoProto prevStorage = 3;</code>
15212           */
15213          public Builder clearPrevStorage() {
15214            if (prevStorageBuilder_ == null) {
15215              prevStorage_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.getDefaultInstance();
15216              onChanged();
15217            } else {
15218              prevStorageBuilder_.clear();
15219            }
15220            bitField0_ = (bitField0_ & ~0x00000004);
15221            return this;
15222          }
15223          /**
15224           * <code>required .hadoop.hdfs.StorageInfoProto prevStorage = 3;</code>
15225           */
15226          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder getPrevStorageBuilder() {
15227            bitField0_ |= 0x00000004;
15228            onChanged();
15229            return getPrevStorageFieldBuilder().getBuilder();
15230          }
15231          /**
15232           * <code>required .hadoop.hdfs.StorageInfoProto prevStorage = 3;</code>
15233           */
15234          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder getPrevStorageOrBuilder() {
15235            if (prevStorageBuilder_ != null) {
15236              return prevStorageBuilder_.getMessageOrBuilder();
15237            } else {
15238              return prevStorage_;
15239            }
15240          }
15241          /**
15242           * <code>required .hadoop.hdfs.StorageInfoProto prevStorage = 3;</code>
15243           */
15244          private com.google.protobuf.SingleFieldBuilder<
15245              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder> 
15246              getPrevStorageFieldBuilder() {
15247            if (prevStorageBuilder_ == null) {
15248              prevStorageBuilder_ = new com.google.protobuf.SingleFieldBuilder<
15249                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageInfoProtoOrBuilder>(
15250                      prevStorage_,
15251                      getParentForChildren(),
15252                      isClean());
15253              prevStorage_ = null;
15254            }
15255            return prevStorageBuilder_;
15256          }
15257    
15258          // required int32 targetLayoutVersion = 4;
15259          private int targetLayoutVersion_ ;
15260          /**
15261           * <code>required int32 targetLayoutVersion = 4;</code>
15262           */
15263          public boolean hasTargetLayoutVersion() {
15264            return ((bitField0_ & 0x00000008) == 0x00000008);
15265          }
15266          /**
15267           * <code>required int32 targetLayoutVersion = 4;</code>
15268           */
15269          public int getTargetLayoutVersion() {
15270            return targetLayoutVersion_;
15271          }
15272          /**
15273           * <code>required int32 targetLayoutVersion = 4;</code>
15274           */
15275          public Builder setTargetLayoutVersion(int value) {
15276            bitField0_ |= 0x00000008;
15277            targetLayoutVersion_ = value;
15278            onChanged();
15279            return this;
15280          }
15281          /**
15282           * <code>required int32 targetLayoutVersion = 4;</code>
15283           */
15284          public Builder clearTargetLayoutVersion() {
15285            bitField0_ = (bitField0_ & ~0x00000008);
15286            targetLayoutVersion_ = 0;
15287            onChanged();
15288            return this;
15289          }
15290    
15291          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.CanRollBackRequestProto)
15292        }
15293    
15294        static {
15295          defaultInstance = new CanRollBackRequestProto(true);
15296          defaultInstance.initFields();
15297        }
15298    
15299        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.CanRollBackRequestProto)
15300      }
15301    
15302      public interface CanRollBackResponseProtoOrBuilder
15303          extends com.google.protobuf.MessageOrBuilder {
15304    
15305        // required bool canRollBack = 1;
15306        /**
15307         * <code>required bool canRollBack = 1;</code>
15308         */
15309        boolean hasCanRollBack();
15310        /**
15311         * <code>required bool canRollBack = 1;</code>
15312         */
15313        boolean getCanRollBack();
15314      }
15315      /**
15316       * Protobuf type {@code hadoop.hdfs.qjournal.CanRollBackResponseProto}
15317       */
15318      public static final class CanRollBackResponseProto extends
15319          com.google.protobuf.GeneratedMessage
15320          implements CanRollBackResponseProtoOrBuilder {
15321        // Use CanRollBackResponseProto.newBuilder() to construct.
15322        private CanRollBackResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
15323          super(builder);
15324          this.unknownFields = builder.getUnknownFields();
15325        }
15326        private CanRollBackResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
15327    
15328        private static final CanRollBackResponseProto defaultInstance;
15329        public static CanRollBackResponseProto getDefaultInstance() {
15330          return defaultInstance;
15331        }
15332    
15333        public CanRollBackResponseProto getDefaultInstanceForType() {
15334          return defaultInstance;
15335        }
15336    
15337        private final com.google.protobuf.UnknownFieldSet unknownFields;
15338        @java.lang.Override
15339        public final com.google.protobuf.UnknownFieldSet
15340            getUnknownFields() {
15341          return this.unknownFields;
15342        }
15343        private CanRollBackResponseProto(
15344            com.google.protobuf.CodedInputStream input,
15345            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15346            throws com.google.protobuf.InvalidProtocolBufferException {
15347          initFields();
15348          int mutable_bitField0_ = 0;
15349          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
15350              com.google.protobuf.UnknownFieldSet.newBuilder();
15351          try {
15352            boolean done = false;
15353            while (!done) {
15354              int tag = input.readTag();
15355              switch (tag) {
15356                case 0:
15357                  done = true;
15358                  break;
15359                default: {
15360                  if (!parseUnknownField(input, unknownFields,
15361                                         extensionRegistry, tag)) {
15362                    done = true;
15363                  }
15364                  break;
15365                }
15366                case 8: {
15367                  bitField0_ |= 0x00000001;
15368                  canRollBack_ = input.readBool();
15369                  break;
15370                }
15371              }
15372            }
15373          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
15374            throw e.setUnfinishedMessage(this);
15375          } catch (java.io.IOException e) {
15376            throw new com.google.protobuf.InvalidProtocolBufferException(
15377                e.getMessage()).setUnfinishedMessage(this);
15378          } finally {
15379            this.unknownFields = unknownFields.build();
15380            makeExtensionsImmutable();
15381          }
15382        }
15383        public static final com.google.protobuf.Descriptors.Descriptor
15384            getDescriptor() {
15385          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_CanRollBackResponseProto_descriptor;
15386        }
15387    
15388        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
15389            internalGetFieldAccessorTable() {
15390          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_CanRollBackResponseProto_fieldAccessorTable
15391              .ensureFieldAccessorsInitialized(
15392                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto.Builder.class);
15393        }
15394    
15395        public static com.google.protobuf.Parser<CanRollBackResponseProto> PARSER =
15396            new com.google.protobuf.AbstractParser<CanRollBackResponseProto>() {
15397          public CanRollBackResponseProto parsePartialFrom(
15398              com.google.protobuf.CodedInputStream input,
15399              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15400              throws com.google.protobuf.InvalidProtocolBufferException {
15401            return new CanRollBackResponseProto(input, extensionRegistry);
15402          }
15403        };
15404    
15405        @java.lang.Override
15406        public com.google.protobuf.Parser<CanRollBackResponseProto> getParserForType() {
15407          return PARSER;
15408        }
15409    
15410        private int bitField0_;
15411        // required bool canRollBack = 1;
15412        public static final int CANROLLBACK_FIELD_NUMBER = 1;
15413        private boolean canRollBack_;
15414        /**
15415         * <code>required bool canRollBack = 1;</code>
15416         */
15417        public boolean hasCanRollBack() {
15418          return ((bitField0_ & 0x00000001) == 0x00000001);
15419        }
15420        /**
15421         * <code>required bool canRollBack = 1;</code>
15422         */
15423        public boolean getCanRollBack() {
15424          return canRollBack_;
15425        }
15426    
15427        private void initFields() {
15428          canRollBack_ = false;
15429        }
15430        private byte memoizedIsInitialized = -1;
15431        public final boolean isInitialized() {
15432          byte isInitialized = memoizedIsInitialized;
15433          if (isInitialized != -1) return isInitialized == 1;
15434    
15435          if (!hasCanRollBack()) {
15436            memoizedIsInitialized = 0;
15437            return false;
15438          }
15439          memoizedIsInitialized = 1;
15440          return true;
15441        }
15442    
15443        public void writeTo(com.google.protobuf.CodedOutputStream output)
15444                            throws java.io.IOException {
15445          getSerializedSize();
15446          if (((bitField0_ & 0x00000001) == 0x00000001)) {
15447            output.writeBool(1, canRollBack_);
15448          }
15449          getUnknownFields().writeTo(output);
15450        }
15451    
15452        private int memoizedSerializedSize = -1;
15453        public int getSerializedSize() {
15454          int size = memoizedSerializedSize;
15455          if (size != -1) return size;
15456    
15457          size = 0;
15458          if (((bitField0_ & 0x00000001) == 0x00000001)) {
15459            size += com.google.protobuf.CodedOutputStream
15460              .computeBoolSize(1, canRollBack_);
15461          }
15462          size += getUnknownFields().getSerializedSize();
15463          memoizedSerializedSize = size;
15464          return size;
15465        }
15466    
15467        private static final long serialVersionUID = 0L;
15468        @java.lang.Override
15469        protected java.lang.Object writeReplace()
15470            throws java.io.ObjectStreamException {
15471          return super.writeReplace();
15472        }
15473    
15474        @java.lang.Override
15475        public boolean equals(final java.lang.Object obj) {
15476          if (obj == this) {
15477           return true;
15478          }
15479          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto)) {
15480            return super.equals(obj);
15481          }
15482          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto) obj;
15483    
15484          boolean result = true;
15485          result = result && (hasCanRollBack() == other.hasCanRollBack());
15486          if (hasCanRollBack()) {
15487            result = result && (getCanRollBack()
15488                == other.getCanRollBack());
15489          }
15490          result = result &&
15491              getUnknownFields().equals(other.getUnknownFields());
15492          return result;
15493        }
15494    
15495        private int memoizedHashCode = 0;
15496        @java.lang.Override
15497        public int hashCode() {
15498          if (memoizedHashCode != 0) {
15499            return memoizedHashCode;
15500          }
15501          int hash = 41;
15502          hash = (19 * hash) + getDescriptorForType().hashCode();
15503          if (hasCanRollBack()) {
15504            hash = (37 * hash) + CANROLLBACK_FIELD_NUMBER;
15505            hash = (53 * hash) + hashBoolean(getCanRollBack());
15506          }
15507          hash = (29 * hash) + getUnknownFields().hashCode();
15508          memoizedHashCode = hash;
15509          return hash;
15510        }
15511    
15512        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto parseFrom(
15513            com.google.protobuf.ByteString data)
15514            throws com.google.protobuf.InvalidProtocolBufferException {
15515          return PARSER.parseFrom(data);
15516        }
15517        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto parseFrom(
15518            com.google.protobuf.ByteString data,
15519            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15520            throws com.google.protobuf.InvalidProtocolBufferException {
15521          return PARSER.parseFrom(data, extensionRegistry);
15522        }
15523        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto parseFrom(byte[] data)
15524            throws com.google.protobuf.InvalidProtocolBufferException {
15525          return PARSER.parseFrom(data);
15526        }
15527        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto parseFrom(
15528            byte[] data,
15529            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15530            throws com.google.protobuf.InvalidProtocolBufferException {
15531          return PARSER.parseFrom(data, extensionRegistry);
15532        }
15533        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto parseFrom(java.io.InputStream input)
15534            throws java.io.IOException {
15535          return PARSER.parseFrom(input);
15536        }
15537        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto parseFrom(
15538            java.io.InputStream input,
15539            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15540            throws java.io.IOException {
15541          return PARSER.parseFrom(input, extensionRegistry);
15542        }
15543        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto parseDelimitedFrom(java.io.InputStream input)
15544            throws java.io.IOException {
15545          return PARSER.parseDelimitedFrom(input);
15546        }
15547        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto parseDelimitedFrom(
15548            java.io.InputStream input,
15549            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15550            throws java.io.IOException {
15551          return PARSER.parseDelimitedFrom(input, extensionRegistry);
15552        }
15553        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto parseFrom(
15554            com.google.protobuf.CodedInputStream input)
15555            throws java.io.IOException {
15556          return PARSER.parseFrom(input);
15557        }
15558        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto parseFrom(
15559            com.google.protobuf.CodedInputStream input,
15560            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15561            throws java.io.IOException {
15562          return PARSER.parseFrom(input, extensionRegistry);
15563        }
15564    
15565        public static Builder newBuilder() { return Builder.create(); }
15566        public Builder newBuilderForType() { return newBuilder(); }
15567        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto prototype) {
15568          return newBuilder().mergeFrom(prototype);
15569        }
15570        public Builder toBuilder() { return newBuilder(this); }
15571    
15572        @java.lang.Override
15573        protected Builder newBuilderForType(
15574            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
15575          Builder builder = new Builder(parent);
15576          return builder;
15577        }
15578        /**
15579         * Protobuf type {@code hadoop.hdfs.qjournal.CanRollBackResponseProto}
15580         */
15581        public static final class Builder extends
15582            com.google.protobuf.GeneratedMessage.Builder<Builder>
15583           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProtoOrBuilder {
15584          public static final com.google.protobuf.Descriptors.Descriptor
15585              getDescriptor() {
15586            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_CanRollBackResponseProto_descriptor;
15587          }
15588    
15589          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
15590              internalGetFieldAccessorTable() {
15591            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_CanRollBackResponseProto_fieldAccessorTable
15592                .ensureFieldAccessorsInitialized(
15593                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto.Builder.class);
15594          }
15595    
15596          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto.newBuilder()
15597          private Builder() {
15598            maybeForceBuilderInitialization();
15599          }
15600    
15601          private Builder(
15602              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
15603            super(parent);
15604            maybeForceBuilderInitialization();
15605          }
15606          private void maybeForceBuilderInitialization() {
15607            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
15608            }
15609          }
15610          private static Builder create() {
15611            return new Builder();
15612          }
15613    
15614          public Builder clear() {
15615            super.clear();
15616            canRollBack_ = false;
15617            bitField0_ = (bitField0_ & ~0x00000001);
15618            return this;
15619          }
15620    
15621          public Builder clone() {
15622            return create().mergeFrom(buildPartial());
15623          }
15624    
15625          public com.google.protobuf.Descriptors.Descriptor
15626              getDescriptorForType() {
15627            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_CanRollBackResponseProto_descriptor;
15628          }
15629    
15630          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto getDefaultInstanceForType() {
15631            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto.getDefaultInstance();
15632          }
15633    
15634          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto build() {
15635            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto result = buildPartial();
15636            if (!result.isInitialized()) {
15637              throw newUninitializedMessageException(result);
15638            }
15639            return result;
15640          }
15641    
15642          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto buildPartial() {
15643            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto(this);
15644            int from_bitField0_ = bitField0_;
15645            int to_bitField0_ = 0;
15646            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
15647              to_bitField0_ |= 0x00000001;
15648            }
15649            result.canRollBack_ = canRollBack_;
15650            result.bitField0_ = to_bitField0_;
15651            onBuilt();
15652            return result;
15653          }
15654    
15655          public Builder mergeFrom(com.google.protobuf.Message other) {
15656            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto) {
15657              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto)other);
15658            } else {
15659              super.mergeFrom(other);
15660              return this;
15661            }
15662          }
15663    
15664          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto other) {
15665            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto.getDefaultInstance()) return this;
15666            if (other.hasCanRollBack()) {
15667              setCanRollBack(other.getCanRollBack());
15668            }
15669            this.mergeUnknownFields(other.getUnknownFields());
15670            return this;
15671          }
15672    
15673          public final boolean isInitialized() {
15674            if (!hasCanRollBack()) {
15675              
15676              return false;
15677            }
15678            return true;
15679          }
15680    
15681          public Builder mergeFrom(
15682              com.google.protobuf.CodedInputStream input,
15683              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15684              throws java.io.IOException {
15685            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto parsedMessage = null;
15686            try {
15687              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
15688            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
15689              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto) e.getUnfinishedMessage();
15690              throw e;
15691            } finally {
15692              if (parsedMessage != null) {
15693                mergeFrom(parsedMessage);
15694              }
15695            }
15696            return this;
15697          }
15698          private int bitField0_;
15699    
15700          // required bool canRollBack = 1;
15701          private boolean canRollBack_ ;
15702          /**
15703           * <code>required bool canRollBack = 1;</code>
15704           */
15705          public boolean hasCanRollBack() {
15706            return ((bitField0_ & 0x00000001) == 0x00000001);
15707          }
15708          /**
15709           * <code>required bool canRollBack = 1;</code>
15710           */
15711          public boolean getCanRollBack() {
15712            return canRollBack_;
15713          }
15714          /**
15715           * <code>required bool canRollBack = 1;</code>
15716           */
15717          public Builder setCanRollBack(boolean value) {
15718            bitField0_ |= 0x00000001;
15719            canRollBack_ = value;
15720            onChanged();
15721            return this;
15722          }
15723          /**
15724           * <code>required bool canRollBack = 1;</code>
15725           */
15726          public Builder clearCanRollBack() {
15727            bitField0_ = (bitField0_ & ~0x00000001);
15728            canRollBack_ = false;
15729            onChanged();
15730            return this;
15731          }
15732    
15733          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.CanRollBackResponseProto)
15734        }
15735    
15736        static {
15737          defaultInstance = new CanRollBackResponseProto(true);
15738          defaultInstance.initFields();
15739        }
15740    
15741        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.CanRollBackResponseProto)
15742      }
15743    
15744      public interface DoRollbackRequestProtoOrBuilder
15745          extends com.google.protobuf.MessageOrBuilder {
15746    
15747        // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
15748        /**
15749         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
15750         */
15751        boolean hasJid();
15752        /**
15753         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
15754         */
15755        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
15756        /**
15757         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
15758         */
15759        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
15760      }
15761      /**
15762       * Protobuf type {@code hadoop.hdfs.qjournal.DoRollbackRequestProto}
15763       *
15764       * <pre>
15765       **
15766       * doRollback()
15767       * </pre>
15768       */
15769      public static final class DoRollbackRequestProto extends
15770          com.google.protobuf.GeneratedMessage
15771          implements DoRollbackRequestProtoOrBuilder {
15772        // Use DoRollbackRequestProto.newBuilder() to construct.
15773        private DoRollbackRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
15774          super(builder);
15775          this.unknownFields = builder.getUnknownFields();
15776        }
15777        private DoRollbackRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
15778    
15779        private static final DoRollbackRequestProto defaultInstance;
15780        public static DoRollbackRequestProto getDefaultInstance() {
15781          return defaultInstance;
15782        }
15783    
15784        public DoRollbackRequestProto getDefaultInstanceForType() {
15785          return defaultInstance;
15786        }
15787    
15788        private final com.google.protobuf.UnknownFieldSet unknownFields;
15789        @java.lang.Override
15790        public final com.google.protobuf.UnknownFieldSet
15791            getUnknownFields() {
15792          return this.unknownFields;
15793        }
15794        private DoRollbackRequestProto(
15795            com.google.protobuf.CodedInputStream input,
15796            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15797            throws com.google.protobuf.InvalidProtocolBufferException {
15798          initFields();
15799          int mutable_bitField0_ = 0;
15800          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
15801              com.google.protobuf.UnknownFieldSet.newBuilder();
15802          try {
15803            boolean done = false;
15804            while (!done) {
15805              int tag = input.readTag();
15806              switch (tag) {
15807                case 0:
15808                  done = true;
15809                  break;
15810                default: {
15811                  if (!parseUnknownField(input, unknownFields,
15812                                         extensionRegistry, tag)) {
15813                    done = true;
15814                  }
15815                  break;
15816                }
15817                case 10: {
15818                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
15819                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
15820                    subBuilder = jid_.toBuilder();
15821                  }
15822                  jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
15823                  if (subBuilder != null) {
15824                    subBuilder.mergeFrom(jid_);
15825                    jid_ = subBuilder.buildPartial();
15826                  }
15827                  bitField0_ |= 0x00000001;
15828                  break;
15829                }
15830              }
15831            }
15832          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
15833            throw e.setUnfinishedMessage(this);
15834          } catch (java.io.IOException e) {
15835            throw new com.google.protobuf.InvalidProtocolBufferException(
15836                e.getMessage()).setUnfinishedMessage(this);
15837          } finally {
15838            this.unknownFields = unknownFields.build();
15839            makeExtensionsImmutable();
15840          }
15841        }
15842        public static final com.google.protobuf.Descriptors.Descriptor
15843            getDescriptor() {
15844          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoRollbackRequestProto_descriptor;
15845        }
15846    
15847        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
15848            internalGetFieldAccessorTable() {
15849          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoRollbackRequestProto_fieldAccessorTable
15850              .ensureFieldAccessorsInitialized(
15851                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto.Builder.class);
15852        }
15853    
15854        public static com.google.protobuf.Parser<DoRollbackRequestProto> PARSER =
15855            new com.google.protobuf.AbstractParser<DoRollbackRequestProto>() {
15856          public DoRollbackRequestProto parsePartialFrom(
15857              com.google.protobuf.CodedInputStream input,
15858              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15859              throws com.google.protobuf.InvalidProtocolBufferException {
15860            return new DoRollbackRequestProto(input, extensionRegistry);
15861          }
15862        };
15863    
15864        @java.lang.Override
15865        public com.google.protobuf.Parser<DoRollbackRequestProto> getParserForType() {
15866          return PARSER;
15867        }
15868    
15869        private int bitField0_;
15870        // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
15871        public static final int JID_FIELD_NUMBER = 1;
15872        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
15873        /**
15874         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
15875         */
15876        public boolean hasJid() {
15877          return ((bitField0_ & 0x00000001) == 0x00000001);
15878        }
15879        /**
15880         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
15881         */
15882        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
15883          return jid_;
15884        }
15885        /**
15886         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
15887         */
15888        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
15889          return jid_;
15890        }
15891    
15892        private void initFields() {
15893          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
15894        }
15895        private byte memoizedIsInitialized = -1;
15896        public final boolean isInitialized() {
15897          byte isInitialized = memoizedIsInitialized;
15898          if (isInitialized != -1) return isInitialized == 1;
15899    
15900          if (!hasJid()) {
15901            memoizedIsInitialized = 0;
15902            return false;
15903          }
15904          if (!getJid().isInitialized()) {
15905            memoizedIsInitialized = 0;
15906            return false;
15907          }
15908          memoizedIsInitialized = 1;
15909          return true;
15910        }
15911    
15912        public void writeTo(com.google.protobuf.CodedOutputStream output)
15913                            throws java.io.IOException {
15914          getSerializedSize();
15915          if (((bitField0_ & 0x00000001) == 0x00000001)) {
15916            output.writeMessage(1, jid_);
15917          }
15918          getUnknownFields().writeTo(output);
15919        }
15920    
15921        private int memoizedSerializedSize = -1;
15922        public int getSerializedSize() {
15923          int size = memoizedSerializedSize;
15924          if (size != -1) return size;
15925    
15926          size = 0;
15927          if (((bitField0_ & 0x00000001) == 0x00000001)) {
15928            size += com.google.protobuf.CodedOutputStream
15929              .computeMessageSize(1, jid_);
15930          }
15931          size += getUnknownFields().getSerializedSize();
15932          memoizedSerializedSize = size;
15933          return size;
15934        }
15935    
15936        private static final long serialVersionUID = 0L;
15937        @java.lang.Override
15938        protected java.lang.Object writeReplace()
15939            throws java.io.ObjectStreamException {
15940          return super.writeReplace();
15941        }
15942    
15943        @java.lang.Override
15944        public boolean equals(final java.lang.Object obj) {
15945          if (obj == this) {
15946           return true;
15947          }
15948          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto)) {
15949            return super.equals(obj);
15950          }
15951          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto) obj;
15952    
15953          boolean result = true;
15954          result = result && (hasJid() == other.hasJid());
15955          if (hasJid()) {
15956            result = result && getJid()
15957                .equals(other.getJid());
15958          }
15959          result = result &&
15960              getUnknownFields().equals(other.getUnknownFields());
15961          return result;
15962        }
15963    
15964        private int memoizedHashCode = 0;
15965        @java.lang.Override
15966        public int hashCode() {
15967          if (memoizedHashCode != 0) {
15968            return memoizedHashCode;
15969          }
15970          int hash = 41;
15971          hash = (19 * hash) + getDescriptorForType().hashCode();
15972          if (hasJid()) {
15973            hash = (37 * hash) + JID_FIELD_NUMBER;
15974            hash = (53 * hash) + getJid().hashCode();
15975          }
15976          hash = (29 * hash) + getUnknownFields().hashCode();
15977          memoizedHashCode = hash;
15978          return hash;
15979        }
15980    
15981        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto parseFrom(
15982            com.google.protobuf.ByteString data)
15983            throws com.google.protobuf.InvalidProtocolBufferException {
15984          return PARSER.parseFrom(data);
15985        }
15986        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto parseFrom(
15987            com.google.protobuf.ByteString data,
15988            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15989            throws com.google.protobuf.InvalidProtocolBufferException {
15990          return PARSER.parseFrom(data, extensionRegistry);
15991        }
15992        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto parseFrom(byte[] data)
15993            throws com.google.protobuf.InvalidProtocolBufferException {
15994          return PARSER.parseFrom(data);
15995        }
15996        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto parseFrom(
15997            byte[] data,
15998            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
15999            throws com.google.protobuf.InvalidProtocolBufferException {
16000          return PARSER.parseFrom(data, extensionRegistry);
16001        }
16002        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto parseFrom(java.io.InputStream input)
16003            throws java.io.IOException {
16004          return PARSER.parseFrom(input);
16005        }
16006        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto parseFrom(
16007            java.io.InputStream input,
16008            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16009            throws java.io.IOException {
16010          return PARSER.parseFrom(input, extensionRegistry);
16011        }
16012        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto parseDelimitedFrom(java.io.InputStream input)
16013            throws java.io.IOException {
16014          return PARSER.parseDelimitedFrom(input);
16015        }
16016        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto parseDelimitedFrom(
16017            java.io.InputStream input,
16018            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16019            throws java.io.IOException {
16020          return PARSER.parseDelimitedFrom(input, extensionRegistry);
16021        }
16022        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto parseFrom(
16023            com.google.protobuf.CodedInputStream input)
16024            throws java.io.IOException {
16025          return PARSER.parseFrom(input);
16026        }
16027        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto parseFrom(
16028            com.google.protobuf.CodedInputStream input,
16029            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16030            throws java.io.IOException {
16031          return PARSER.parseFrom(input, extensionRegistry);
16032        }
16033    
16034        public static Builder newBuilder() { return Builder.create(); }
16035        public Builder newBuilderForType() { return newBuilder(); }
16036        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto prototype) {
16037          return newBuilder().mergeFrom(prototype);
16038        }
16039        public Builder toBuilder() { return newBuilder(this); }
16040    
16041        @java.lang.Override
16042        protected Builder newBuilderForType(
16043            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
16044          Builder builder = new Builder(parent);
16045          return builder;
16046        }
16047        /**
16048         * Protobuf type {@code hadoop.hdfs.qjournal.DoRollbackRequestProto}
16049         *
16050         * <pre>
16051         **
16052         * doRollback()
16053         * </pre>
16054         */
16055        public static final class Builder extends
16056            com.google.protobuf.GeneratedMessage.Builder<Builder>
16057           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProtoOrBuilder {
16058          public static final com.google.protobuf.Descriptors.Descriptor
16059              getDescriptor() {
16060            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoRollbackRequestProto_descriptor;
16061          }
16062    
16063          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
16064              internalGetFieldAccessorTable() {
16065            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoRollbackRequestProto_fieldAccessorTable
16066                .ensureFieldAccessorsInitialized(
16067                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto.Builder.class);
16068          }
16069    
16070          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto.newBuilder()
16071          private Builder() {
16072            maybeForceBuilderInitialization();
16073          }
16074    
16075          private Builder(
16076              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
16077            super(parent);
16078            maybeForceBuilderInitialization();
16079          }
16080          private void maybeForceBuilderInitialization() {
16081            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
16082              getJidFieldBuilder();
16083            }
16084          }
16085          private static Builder create() {
16086            return new Builder();
16087          }
16088    
16089          public Builder clear() {
16090            super.clear();
16091            if (jidBuilder_ == null) {
16092              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
16093            } else {
16094              jidBuilder_.clear();
16095            }
16096            bitField0_ = (bitField0_ & ~0x00000001);
16097            return this;
16098          }
16099    
16100          public Builder clone() {
16101            return create().mergeFrom(buildPartial());
16102          }
16103    
16104          public com.google.protobuf.Descriptors.Descriptor
16105              getDescriptorForType() {
16106            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoRollbackRequestProto_descriptor;
16107          }
16108    
16109          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto getDefaultInstanceForType() {
16110            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto.getDefaultInstance();
16111          }
16112    
16113          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto build() {
16114            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto result = buildPartial();
16115            if (!result.isInitialized()) {
16116              throw newUninitializedMessageException(result);
16117            }
16118            return result;
16119          }
16120    
16121          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto buildPartial() {
16122            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto(this);
16123            int from_bitField0_ = bitField0_;
16124            int to_bitField0_ = 0;
16125            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
16126              to_bitField0_ |= 0x00000001;
16127            }
16128            if (jidBuilder_ == null) {
16129              result.jid_ = jid_;
16130            } else {
16131              result.jid_ = jidBuilder_.build();
16132            }
16133            result.bitField0_ = to_bitField0_;
16134            onBuilt();
16135            return result;
16136          }
16137    
16138          public Builder mergeFrom(com.google.protobuf.Message other) {
16139            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto) {
16140              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto)other);
16141            } else {
16142              super.mergeFrom(other);
16143              return this;
16144            }
16145          }
16146    
16147          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto other) {
16148            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto.getDefaultInstance()) return this;
16149            if (other.hasJid()) {
16150              mergeJid(other.getJid());
16151            }
16152            this.mergeUnknownFields(other.getUnknownFields());
16153            return this;
16154          }
16155    
16156          public final boolean isInitialized() {
16157            if (!hasJid()) {
16158              
16159              return false;
16160            }
16161            if (!getJid().isInitialized()) {
16162              
16163              return false;
16164            }
16165            return true;
16166          }
16167    
16168          public Builder mergeFrom(
16169              com.google.protobuf.CodedInputStream input,
16170              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16171              throws java.io.IOException {
16172            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto parsedMessage = null;
16173            try {
16174              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
16175            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
16176              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto) e.getUnfinishedMessage();
16177              throw e;
16178            } finally {
16179              if (parsedMessage != null) {
16180                mergeFrom(parsedMessage);
16181              }
16182            }
16183            return this;
16184          }
16185          private int bitField0_;
16186    
16187          // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
16188          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
16189          private com.google.protobuf.SingleFieldBuilder<
16190              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
16191          /**
16192           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
16193           */
16194          public boolean hasJid() {
16195            return ((bitField0_ & 0x00000001) == 0x00000001);
16196          }
16197          /**
16198           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
16199           */
16200          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
16201            if (jidBuilder_ == null) {
16202              return jid_;
16203            } else {
16204              return jidBuilder_.getMessage();
16205            }
16206          }
16207          /**
16208           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
16209           */
16210          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
16211            if (jidBuilder_ == null) {
16212              if (value == null) {
16213                throw new NullPointerException();
16214              }
16215              jid_ = value;
16216              onChanged();
16217            } else {
16218              jidBuilder_.setMessage(value);
16219            }
16220            bitField0_ |= 0x00000001;
16221            return this;
16222          }
16223          /**
16224           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
16225           */
16226          public Builder setJid(
16227              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
16228            if (jidBuilder_ == null) {
16229              jid_ = builderForValue.build();
16230              onChanged();
16231            } else {
16232              jidBuilder_.setMessage(builderForValue.build());
16233            }
16234            bitField0_ |= 0x00000001;
16235            return this;
16236          }
16237          /**
16238           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
16239           */
16240          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
16241            if (jidBuilder_ == null) {
16242              if (((bitField0_ & 0x00000001) == 0x00000001) &&
16243                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
16244                jid_ =
16245                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
16246              } else {
16247                jid_ = value;
16248              }
16249              onChanged();
16250            } else {
16251              jidBuilder_.mergeFrom(value);
16252            }
16253            bitField0_ |= 0x00000001;
16254            return this;
16255          }
16256          /**
16257           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
16258           */
16259          public Builder clearJid() {
16260            if (jidBuilder_ == null) {
16261              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
16262              onChanged();
16263            } else {
16264              jidBuilder_.clear();
16265            }
16266            bitField0_ = (bitField0_ & ~0x00000001);
16267            return this;
16268          }
16269          /**
16270           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
16271           */
16272          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
16273            bitField0_ |= 0x00000001;
16274            onChanged();
16275            return getJidFieldBuilder().getBuilder();
16276          }
16277          /**
16278           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
16279           */
16280          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
16281            if (jidBuilder_ != null) {
16282              return jidBuilder_.getMessageOrBuilder();
16283            } else {
16284              return jid_;
16285            }
16286          }
16287          /**
16288           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
16289           */
16290          private com.google.protobuf.SingleFieldBuilder<
16291              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
16292              getJidFieldBuilder() {
16293            if (jidBuilder_ == null) {
16294              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
16295                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
16296                      jid_,
16297                      getParentForChildren(),
16298                      isClean());
16299              jid_ = null;
16300            }
16301            return jidBuilder_;
16302          }
16303    
16304          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.DoRollbackRequestProto)
16305        }
16306    
16307        static {
16308          defaultInstance = new DoRollbackRequestProto(true);
16309          defaultInstance.initFields();
16310        }
16311    
16312        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.DoRollbackRequestProto)
16313      }
16314    
16315      public interface DoRollbackResponseProtoOrBuilder
16316          extends com.google.protobuf.MessageOrBuilder {
16317      }
16318      /**
16319       * Protobuf type {@code hadoop.hdfs.qjournal.DoRollbackResponseProto}
16320       */
16321      public static final class DoRollbackResponseProto extends
16322          com.google.protobuf.GeneratedMessage
16323          implements DoRollbackResponseProtoOrBuilder {
16324        // Use DoRollbackResponseProto.newBuilder() to construct.
16325        private DoRollbackResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
16326          super(builder);
16327          this.unknownFields = builder.getUnknownFields();
16328        }
16329        private DoRollbackResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
16330    
16331        private static final DoRollbackResponseProto defaultInstance;
16332        public static DoRollbackResponseProto getDefaultInstance() {
16333          return defaultInstance;
16334        }
16335    
16336        public DoRollbackResponseProto getDefaultInstanceForType() {
16337          return defaultInstance;
16338        }
16339    
16340        private final com.google.protobuf.UnknownFieldSet unknownFields;
16341        @java.lang.Override
16342        public final com.google.protobuf.UnknownFieldSet
16343            getUnknownFields() {
16344          return this.unknownFields;
16345        }
16346        private DoRollbackResponseProto(
16347            com.google.protobuf.CodedInputStream input,
16348            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16349            throws com.google.protobuf.InvalidProtocolBufferException {
16350          initFields();
16351          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
16352              com.google.protobuf.UnknownFieldSet.newBuilder();
16353          try {
16354            boolean done = false;
16355            while (!done) {
16356              int tag = input.readTag();
16357              switch (tag) {
16358                case 0:
16359                  done = true;
16360                  break;
16361                default: {
16362                  if (!parseUnknownField(input, unknownFields,
16363                                         extensionRegistry, tag)) {
16364                    done = true;
16365                  }
16366                  break;
16367                }
16368              }
16369            }
16370          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
16371            throw e.setUnfinishedMessage(this);
16372          } catch (java.io.IOException e) {
16373            throw new com.google.protobuf.InvalidProtocolBufferException(
16374                e.getMessage()).setUnfinishedMessage(this);
16375          } finally {
16376            this.unknownFields = unknownFields.build();
16377            makeExtensionsImmutable();
16378          }
16379        }
16380        public static final com.google.protobuf.Descriptors.Descriptor
16381            getDescriptor() {
16382          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoRollbackResponseProto_descriptor;
16383        }
16384    
16385        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
16386            internalGetFieldAccessorTable() {
16387          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoRollbackResponseProto_fieldAccessorTable
16388              .ensureFieldAccessorsInitialized(
16389                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto.Builder.class);
16390        }
16391    
16392        public static com.google.protobuf.Parser<DoRollbackResponseProto> PARSER =
16393            new com.google.protobuf.AbstractParser<DoRollbackResponseProto>() {
16394          public DoRollbackResponseProto parsePartialFrom(
16395              com.google.protobuf.CodedInputStream input,
16396              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16397              throws com.google.protobuf.InvalidProtocolBufferException {
16398            return new DoRollbackResponseProto(input, extensionRegistry);
16399          }
16400        };
16401    
16402        @java.lang.Override
16403        public com.google.protobuf.Parser<DoRollbackResponseProto> getParserForType() {
16404          return PARSER;
16405        }
16406    
16407        private void initFields() {
16408        }
16409        private byte memoizedIsInitialized = -1;
16410        public final boolean isInitialized() {
16411          byte isInitialized = memoizedIsInitialized;
16412          if (isInitialized != -1) return isInitialized == 1;
16413    
16414          memoizedIsInitialized = 1;
16415          return true;
16416        }
16417    
16418        public void writeTo(com.google.protobuf.CodedOutputStream output)
16419                            throws java.io.IOException {
16420          getSerializedSize();
16421          getUnknownFields().writeTo(output);
16422        }
16423    
16424        private int memoizedSerializedSize = -1;
16425        public int getSerializedSize() {
16426          int size = memoizedSerializedSize;
16427          if (size != -1) return size;
16428    
16429          size = 0;
16430          size += getUnknownFields().getSerializedSize();
16431          memoizedSerializedSize = size;
16432          return size;
16433        }
16434    
16435        private static final long serialVersionUID = 0L;
16436        @java.lang.Override
16437        protected java.lang.Object writeReplace()
16438            throws java.io.ObjectStreamException {
16439          return super.writeReplace();
16440        }
16441    
16442        @java.lang.Override
16443        public boolean equals(final java.lang.Object obj) {
16444          if (obj == this) {
16445           return true;
16446          }
16447          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto)) {
16448            return super.equals(obj);
16449          }
16450          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto) obj;
16451    
16452          boolean result = true;
16453          result = result &&
16454              getUnknownFields().equals(other.getUnknownFields());
16455          return result;
16456        }
16457    
16458        private int memoizedHashCode = 0;
16459        @java.lang.Override
16460        public int hashCode() {
16461          if (memoizedHashCode != 0) {
16462            return memoizedHashCode;
16463          }
16464          int hash = 41;
16465          hash = (19 * hash) + getDescriptorForType().hashCode();
16466          hash = (29 * hash) + getUnknownFields().hashCode();
16467          memoizedHashCode = hash;
16468          return hash;
16469        }
16470    
16471        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto parseFrom(
16472            com.google.protobuf.ByteString data)
16473            throws com.google.protobuf.InvalidProtocolBufferException {
16474          return PARSER.parseFrom(data);
16475        }
16476        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto parseFrom(
16477            com.google.protobuf.ByteString data,
16478            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16479            throws com.google.protobuf.InvalidProtocolBufferException {
16480          return PARSER.parseFrom(data, extensionRegistry);
16481        }
16482        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto parseFrom(byte[] data)
16483            throws com.google.protobuf.InvalidProtocolBufferException {
16484          return PARSER.parseFrom(data);
16485        }
16486        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto parseFrom(
16487            byte[] data,
16488            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16489            throws com.google.protobuf.InvalidProtocolBufferException {
16490          return PARSER.parseFrom(data, extensionRegistry);
16491        }
16492        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto parseFrom(java.io.InputStream input)
16493            throws java.io.IOException {
16494          return PARSER.parseFrom(input);
16495        }
16496        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto parseFrom(
16497            java.io.InputStream input,
16498            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16499            throws java.io.IOException {
16500          return PARSER.parseFrom(input, extensionRegistry);
16501        }
16502        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto parseDelimitedFrom(java.io.InputStream input)
16503            throws java.io.IOException {
16504          return PARSER.parseDelimitedFrom(input);
16505        }
16506        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto parseDelimitedFrom(
16507            java.io.InputStream input,
16508            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16509            throws java.io.IOException {
16510          return PARSER.parseDelimitedFrom(input, extensionRegistry);
16511        }
16512        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto parseFrom(
16513            com.google.protobuf.CodedInputStream input)
16514            throws java.io.IOException {
16515          return PARSER.parseFrom(input);
16516        }
16517        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto parseFrom(
16518            com.google.protobuf.CodedInputStream input,
16519            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16520            throws java.io.IOException {
16521          return PARSER.parseFrom(input, extensionRegistry);
16522        }
16523    
16524        public static Builder newBuilder() { return Builder.create(); }
16525        public Builder newBuilderForType() { return newBuilder(); }
16526        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto prototype) {
16527          return newBuilder().mergeFrom(prototype);
16528        }
16529        public Builder toBuilder() { return newBuilder(this); }
16530    
16531        @java.lang.Override
16532        protected Builder newBuilderForType(
16533            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
16534          Builder builder = new Builder(parent);
16535          return builder;
16536        }
16537        /**
16538         * Protobuf type {@code hadoop.hdfs.qjournal.DoRollbackResponseProto}
16539         */
16540        public static final class Builder extends
16541            com.google.protobuf.GeneratedMessage.Builder<Builder>
16542           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProtoOrBuilder {
16543          public static final com.google.protobuf.Descriptors.Descriptor
16544              getDescriptor() {
16545            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoRollbackResponseProto_descriptor;
16546          }
16547    
16548          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
16549              internalGetFieldAccessorTable() {
16550            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoRollbackResponseProto_fieldAccessorTable
16551                .ensureFieldAccessorsInitialized(
16552                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto.Builder.class);
16553          }
16554    
16555          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto.newBuilder()
16556          private Builder() {
16557            maybeForceBuilderInitialization();
16558          }
16559    
16560          private Builder(
16561              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
16562            super(parent);
16563            maybeForceBuilderInitialization();
16564          }
16565          private void maybeForceBuilderInitialization() {
16566            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
16567            }
16568          }
16569          private static Builder create() {
16570            return new Builder();
16571          }
16572    
16573          public Builder clear() {
16574            super.clear();
16575            return this;
16576          }
16577    
16578          public Builder clone() {
16579            return create().mergeFrom(buildPartial());
16580          }
16581    
16582          public com.google.protobuf.Descriptors.Descriptor
16583              getDescriptorForType() {
16584            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_DoRollbackResponseProto_descriptor;
16585          }
16586    
16587          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto getDefaultInstanceForType() {
16588            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto.getDefaultInstance();
16589          }
16590    
16591          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto build() {
16592            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto result = buildPartial();
16593            if (!result.isInitialized()) {
16594              throw newUninitializedMessageException(result);
16595            }
16596            return result;
16597          }
16598    
16599          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto buildPartial() {
16600            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto(this);
16601            onBuilt();
16602            return result;
16603          }
16604    
16605          public Builder mergeFrom(com.google.protobuf.Message other) {
16606            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto) {
16607              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto)other);
16608            } else {
16609              super.mergeFrom(other);
16610              return this;
16611            }
16612          }
16613    
16614          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto other) {
16615            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto.getDefaultInstance()) return this;
16616            this.mergeUnknownFields(other.getUnknownFields());
16617            return this;
16618          }
16619    
16620          public final boolean isInitialized() {
16621            return true;
16622          }
16623    
16624          public Builder mergeFrom(
16625              com.google.protobuf.CodedInputStream input,
16626              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16627              throws java.io.IOException {
16628            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto parsedMessage = null;
16629            try {
16630              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
16631            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
16632              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto) e.getUnfinishedMessage();
16633              throw e;
16634            } finally {
16635              if (parsedMessage != null) {
16636                mergeFrom(parsedMessage);
16637              }
16638            }
16639            return this;
16640          }
16641    
16642          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.DoRollbackResponseProto)
16643        }
16644    
16645        static {
16646          defaultInstance = new DoRollbackResponseProto(true);
16647          defaultInstance.initFields();
16648        }
16649    
16650        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.DoRollbackResponseProto)
16651      }
16652    
16653      public interface GetJournalStateRequestProtoOrBuilder
16654          extends com.google.protobuf.MessageOrBuilder {
16655    
16656        // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
16657        /**
16658         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
16659         */
16660        boolean hasJid();
16661        /**
16662         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
16663         */
16664        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
16665        /**
16666         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
16667         */
16668        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
16669      }
16670      /**
16671       * Protobuf type {@code hadoop.hdfs.qjournal.GetJournalStateRequestProto}
16672       *
16673       * <pre>
16674       **
16675       * getJournalState()
16676       * </pre>
16677       */
16678      public static final class GetJournalStateRequestProto extends
16679          com.google.protobuf.GeneratedMessage
16680          implements GetJournalStateRequestProtoOrBuilder {
16681        // Use GetJournalStateRequestProto.newBuilder() to construct.
16682        private GetJournalStateRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
16683          super(builder);
16684          this.unknownFields = builder.getUnknownFields();
16685        }
16686        private GetJournalStateRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
16687    
16688        private static final GetJournalStateRequestProto defaultInstance;
16689        public static GetJournalStateRequestProto getDefaultInstance() {
16690          return defaultInstance;
16691        }
16692    
16693        public GetJournalStateRequestProto getDefaultInstanceForType() {
16694          return defaultInstance;
16695        }
16696    
16697        private final com.google.protobuf.UnknownFieldSet unknownFields;
16698        @java.lang.Override
16699        public final com.google.protobuf.UnknownFieldSet
16700            getUnknownFields() {
16701          return this.unknownFields;
16702        }
16703        private GetJournalStateRequestProto(
16704            com.google.protobuf.CodedInputStream input,
16705            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16706            throws com.google.protobuf.InvalidProtocolBufferException {
16707          initFields();
16708          int mutable_bitField0_ = 0;
16709          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
16710              com.google.protobuf.UnknownFieldSet.newBuilder();
16711          try {
16712            boolean done = false;
16713            while (!done) {
16714              int tag = input.readTag();
16715              switch (tag) {
16716                case 0:
16717                  done = true;
16718                  break;
16719                default: {
16720                  if (!parseUnknownField(input, unknownFields,
16721                                         extensionRegistry, tag)) {
16722                    done = true;
16723                  }
16724                  break;
16725                }
16726                case 10: {
16727                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
16728                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
16729                    subBuilder = jid_.toBuilder();
16730                  }
16731                  jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
16732                  if (subBuilder != null) {
16733                    subBuilder.mergeFrom(jid_);
16734                    jid_ = subBuilder.buildPartial();
16735                  }
16736                  bitField0_ |= 0x00000001;
16737                  break;
16738                }
16739              }
16740            }
16741          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
16742            throw e.setUnfinishedMessage(this);
16743          } catch (java.io.IOException e) {
16744            throw new com.google.protobuf.InvalidProtocolBufferException(
16745                e.getMessage()).setUnfinishedMessage(this);
16746          } finally {
16747            this.unknownFields = unknownFields.build();
16748            makeExtensionsImmutable();
16749          }
16750        }
16751        public static final com.google.protobuf.Descriptors.Descriptor
16752            getDescriptor() {
16753          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalStateRequestProto_descriptor;
16754        }
16755    
16756        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
16757            internalGetFieldAccessorTable() {
16758          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalStateRequestProto_fieldAccessorTable
16759              .ensureFieldAccessorsInitialized(
16760                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.Builder.class);
16761        }
16762    
16763        public static com.google.protobuf.Parser<GetJournalStateRequestProto> PARSER =
16764            new com.google.protobuf.AbstractParser<GetJournalStateRequestProto>() {
16765          public GetJournalStateRequestProto parsePartialFrom(
16766              com.google.protobuf.CodedInputStream input,
16767              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16768              throws com.google.protobuf.InvalidProtocolBufferException {
16769            return new GetJournalStateRequestProto(input, extensionRegistry);
16770          }
16771        };
16772    
16773        @java.lang.Override
16774        public com.google.protobuf.Parser<GetJournalStateRequestProto> getParserForType() {
16775          return PARSER;
16776        }
16777    
16778        private int bitField0_;
16779        // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
16780        public static final int JID_FIELD_NUMBER = 1;
16781        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
16782        /**
16783         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
16784         */
16785        public boolean hasJid() {
16786          return ((bitField0_ & 0x00000001) == 0x00000001);
16787        }
16788        /**
16789         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
16790         */
16791        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
16792          return jid_;
16793        }
16794        /**
16795         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
16796         */
16797        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
16798          return jid_;
16799        }
16800    
16801        private void initFields() {
16802          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
16803        }
16804        private byte memoizedIsInitialized = -1;
16805        public final boolean isInitialized() {
16806          byte isInitialized = memoizedIsInitialized;
16807          if (isInitialized != -1) return isInitialized == 1;
16808    
16809          if (!hasJid()) {
16810            memoizedIsInitialized = 0;
16811            return false;
16812          }
16813          if (!getJid().isInitialized()) {
16814            memoizedIsInitialized = 0;
16815            return false;
16816          }
16817          memoizedIsInitialized = 1;
16818          return true;
16819        }
16820    
16821        public void writeTo(com.google.protobuf.CodedOutputStream output)
16822                            throws java.io.IOException {
16823          getSerializedSize();
16824          if (((bitField0_ & 0x00000001) == 0x00000001)) {
16825            output.writeMessage(1, jid_);
16826          }
16827          getUnknownFields().writeTo(output);
16828        }
16829    
16830        private int memoizedSerializedSize = -1;
16831        public int getSerializedSize() {
16832          int size = memoizedSerializedSize;
16833          if (size != -1) return size;
16834    
16835          size = 0;
16836          if (((bitField0_ & 0x00000001) == 0x00000001)) {
16837            size += com.google.protobuf.CodedOutputStream
16838              .computeMessageSize(1, jid_);
16839          }
16840          size += getUnknownFields().getSerializedSize();
16841          memoizedSerializedSize = size;
16842          return size;
16843        }
16844    
16845        private static final long serialVersionUID = 0L;
16846        @java.lang.Override
16847        protected java.lang.Object writeReplace()
16848            throws java.io.ObjectStreamException {
16849          return super.writeReplace();
16850        }
16851    
16852        @java.lang.Override
16853        public boolean equals(final java.lang.Object obj) {
16854          if (obj == this) {
16855           return true;
16856          }
16857          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)) {
16858            return super.equals(obj);
16859          }
16860          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto) obj;
16861    
16862          boolean result = true;
16863          result = result && (hasJid() == other.hasJid());
16864          if (hasJid()) {
16865            result = result && getJid()
16866                .equals(other.getJid());
16867          }
16868          result = result &&
16869              getUnknownFields().equals(other.getUnknownFields());
16870          return result;
16871        }
16872    
16873        private int memoizedHashCode = 0;
16874        @java.lang.Override
16875        public int hashCode() {
16876          if (memoizedHashCode != 0) {
16877            return memoizedHashCode;
16878          }
16879          int hash = 41;
16880          hash = (19 * hash) + getDescriptorForType().hashCode();
16881          if (hasJid()) {
16882            hash = (37 * hash) + JID_FIELD_NUMBER;
16883            hash = (53 * hash) + getJid().hashCode();
16884          }
16885          hash = (29 * hash) + getUnknownFields().hashCode();
16886          memoizedHashCode = hash;
16887          return hash;
16888        }
16889    
16890        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
16891            com.google.protobuf.ByteString data)
16892            throws com.google.protobuf.InvalidProtocolBufferException {
16893          return PARSER.parseFrom(data);
16894        }
16895        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
16896            com.google.protobuf.ByteString data,
16897            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16898            throws com.google.protobuf.InvalidProtocolBufferException {
16899          return PARSER.parseFrom(data, extensionRegistry);
16900        }
16901        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(byte[] data)
16902            throws com.google.protobuf.InvalidProtocolBufferException {
16903          return PARSER.parseFrom(data);
16904        }
16905        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
16906            byte[] data,
16907            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16908            throws com.google.protobuf.InvalidProtocolBufferException {
16909          return PARSER.parseFrom(data, extensionRegistry);
16910        }
16911        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(java.io.InputStream input)
16912            throws java.io.IOException {
16913          return PARSER.parseFrom(input);
16914        }
16915        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
16916            java.io.InputStream input,
16917            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16918            throws java.io.IOException {
16919          return PARSER.parseFrom(input, extensionRegistry);
16920        }
16921        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseDelimitedFrom(java.io.InputStream input)
16922            throws java.io.IOException {
16923          return PARSER.parseDelimitedFrom(input);
16924        }
16925        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseDelimitedFrom(
16926            java.io.InputStream input,
16927            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16928            throws java.io.IOException {
16929          return PARSER.parseDelimitedFrom(input, extensionRegistry);
16930        }
16931        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
16932            com.google.protobuf.CodedInputStream input)
16933            throws java.io.IOException {
16934          return PARSER.parseFrom(input);
16935        }
16936        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
16937            com.google.protobuf.CodedInputStream input,
16938            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
16939            throws java.io.IOException {
16940          return PARSER.parseFrom(input, extensionRegistry);
16941        }
16942    
16943        public static Builder newBuilder() { return Builder.create(); }
16944        public Builder newBuilderForType() { return newBuilder(); }
16945        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto prototype) {
16946          return newBuilder().mergeFrom(prototype);
16947        }
16948        public Builder toBuilder() { return newBuilder(this); }
16949    
16950        @java.lang.Override
16951        protected Builder newBuilderForType(
16952            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
16953          Builder builder = new Builder(parent);
16954          return builder;
16955        }
16956        /**
16957         * Protobuf type {@code hadoop.hdfs.qjournal.GetJournalStateRequestProto}
16958         *
16959         * <pre>
16960         **
16961         * getJournalState()
16962         * </pre>
16963         */
16964        public static final class Builder extends
16965            com.google.protobuf.GeneratedMessage.Builder<Builder>
16966           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProtoOrBuilder {
16967          public static final com.google.protobuf.Descriptors.Descriptor
16968              getDescriptor() {
16969            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalStateRequestProto_descriptor;
16970          }
16971    
16972          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
16973              internalGetFieldAccessorTable() {
16974            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalStateRequestProto_fieldAccessorTable
16975                .ensureFieldAccessorsInitialized(
16976                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.Builder.class);
16977          }
16978    
16979          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.newBuilder()
16980          private Builder() {
16981            maybeForceBuilderInitialization();
16982          }
16983    
16984          private Builder(
16985              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
16986            super(parent);
16987            maybeForceBuilderInitialization();
16988          }
16989          private void maybeForceBuilderInitialization() {
16990            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
16991              getJidFieldBuilder();
16992            }
16993          }
16994          private static Builder create() {
16995            return new Builder();
16996          }
16997    
16998          public Builder clear() {
16999            super.clear();
17000            if (jidBuilder_ == null) {
17001              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
17002            } else {
17003              jidBuilder_.clear();
17004            }
17005            bitField0_ = (bitField0_ & ~0x00000001);
17006            return this;
17007          }
17008    
17009          public Builder clone() {
17010            return create().mergeFrom(buildPartial());
17011          }
17012    
17013          public com.google.protobuf.Descriptors.Descriptor
17014              getDescriptorForType() {
17015            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalStateRequestProto_descriptor;
17016          }
17017    
17018          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto getDefaultInstanceForType() {
17019            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance();
17020          }
17021    
17022          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto build() {
17023            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto result = buildPartial();
17024            if (!result.isInitialized()) {
17025              throw newUninitializedMessageException(result);
17026            }
17027            return result;
17028          }
17029    
17030          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto buildPartial() {
17031            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto(this);
17032            int from_bitField0_ = bitField0_;
17033            int to_bitField0_ = 0;
17034            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
17035              to_bitField0_ |= 0x00000001;
17036            }
17037            if (jidBuilder_ == null) {
17038              result.jid_ = jid_;
17039            } else {
17040              result.jid_ = jidBuilder_.build();
17041            }
17042            result.bitField0_ = to_bitField0_;
17043            onBuilt();
17044            return result;
17045          }
17046    
17047          public Builder mergeFrom(com.google.protobuf.Message other) {
17048            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto) {
17049              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)other);
17050            } else {
17051              super.mergeFrom(other);
17052              return this;
17053            }
17054          }
17055    
17056          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto other) {
17057            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance()) return this;
17058            if (other.hasJid()) {
17059              mergeJid(other.getJid());
17060            }
17061            this.mergeUnknownFields(other.getUnknownFields());
17062            return this;
17063          }
17064    
17065          public final boolean isInitialized() {
17066            if (!hasJid()) {
17067              
17068              return false;
17069            }
17070            if (!getJid().isInitialized()) {
17071              
17072              return false;
17073            }
17074            return true;
17075          }
17076    
17077          public Builder mergeFrom(
17078              com.google.protobuf.CodedInputStream input,
17079              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17080              throws java.io.IOException {
17081            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parsedMessage = null;
17082            try {
17083              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
17084            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
17085              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto) e.getUnfinishedMessage();
17086              throw e;
17087            } finally {
17088              if (parsedMessage != null) {
17089                mergeFrom(parsedMessage);
17090              }
17091            }
17092            return this;
17093          }
17094          private int bitField0_;
17095    
17096          // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
17097          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
17098          private com.google.protobuf.SingleFieldBuilder<
17099              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
17100          /**
17101           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
17102           */
17103          public boolean hasJid() {
17104            return ((bitField0_ & 0x00000001) == 0x00000001);
17105          }
17106          /**
17107           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
17108           */
17109          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
17110            if (jidBuilder_ == null) {
17111              return jid_;
17112            } else {
17113              return jidBuilder_.getMessage();
17114            }
17115          }
17116          /**
17117           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
17118           */
17119          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
17120            if (jidBuilder_ == null) {
17121              if (value == null) {
17122                throw new NullPointerException();
17123              }
17124              jid_ = value;
17125              onChanged();
17126            } else {
17127              jidBuilder_.setMessage(value);
17128            }
17129            bitField0_ |= 0x00000001;
17130            return this;
17131          }
17132          /**
17133           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
17134           */
17135          public Builder setJid(
17136              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
17137            if (jidBuilder_ == null) {
17138              jid_ = builderForValue.build();
17139              onChanged();
17140            } else {
17141              jidBuilder_.setMessage(builderForValue.build());
17142            }
17143            bitField0_ |= 0x00000001;
17144            return this;
17145          }
17146          /**
17147           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
17148           */
17149          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
17150            if (jidBuilder_ == null) {
17151              if (((bitField0_ & 0x00000001) == 0x00000001) &&
17152                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
17153                jid_ =
17154                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
17155              } else {
17156                jid_ = value;
17157              }
17158              onChanged();
17159            } else {
17160              jidBuilder_.mergeFrom(value);
17161            }
17162            bitField0_ |= 0x00000001;
17163            return this;
17164          }
17165          /**
17166           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
17167           */
17168          public Builder clearJid() {
17169            if (jidBuilder_ == null) {
17170              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
17171              onChanged();
17172            } else {
17173              jidBuilder_.clear();
17174            }
17175            bitField0_ = (bitField0_ & ~0x00000001);
17176            return this;
17177          }
17178          /**
17179           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
17180           */
17181          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
17182            bitField0_ |= 0x00000001;
17183            onChanged();
17184            return getJidFieldBuilder().getBuilder();
17185          }
17186          /**
17187           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
17188           */
17189          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
17190            if (jidBuilder_ != null) {
17191              return jidBuilder_.getMessageOrBuilder();
17192            } else {
17193              return jid_;
17194            }
17195          }
17196          /**
17197           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
17198           */
17199          private com.google.protobuf.SingleFieldBuilder<
17200              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
17201              getJidFieldBuilder() {
17202            if (jidBuilder_ == null) {
17203              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
17204                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
17205                      jid_,
17206                      getParentForChildren(),
17207                      isClean());
17208              jid_ = null;
17209            }
17210            return jidBuilder_;
17211          }
17212    
17213          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.GetJournalStateRequestProto)
17214        }
17215    
17216        static {
17217          defaultInstance = new GetJournalStateRequestProto(true);
17218          defaultInstance.initFields();
17219        }
17220    
17221        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.GetJournalStateRequestProto)
17222      }
17223    
17224      public interface GetJournalStateResponseProtoOrBuilder
17225          extends com.google.protobuf.MessageOrBuilder {
17226    
17227        // required uint64 lastPromisedEpoch = 1;
17228        /**
17229         * <code>required uint64 lastPromisedEpoch = 1;</code>
17230         */
17231        boolean hasLastPromisedEpoch();
17232        /**
17233         * <code>required uint64 lastPromisedEpoch = 1;</code>
17234         */
17235        long getLastPromisedEpoch();
17236    
17237        // required uint32 httpPort = 2;
17238        /**
17239         * <code>required uint32 httpPort = 2;</code>
17240         *
17241         * <pre>
17242         * Deprecated by fromURL
17243         * </pre>
17244         */
17245        boolean hasHttpPort();
17246        /**
17247         * <code>required uint32 httpPort = 2;</code>
17248         *
17249         * <pre>
17250         * Deprecated by fromURL
17251         * </pre>
17252         */
17253        int getHttpPort();
17254    
17255        // optional string fromURL = 3;
17256        /**
17257         * <code>optional string fromURL = 3;</code>
17258         */
17259        boolean hasFromURL();
17260        /**
17261         * <code>optional string fromURL = 3;</code>
17262         */
17263        java.lang.String getFromURL();
17264        /**
17265         * <code>optional string fromURL = 3;</code>
17266         */
17267        com.google.protobuf.ByteString
17268            getFromURLBytes();
17269      }
17270      /**
17271       * Protobuf type {@code hadoop.hdfs.qjournal.GetJournalStateResponseProto}
17272       */
17273      public static final class GetJournalStateResponseProto extends
17274          com.google.protobuf.GeneratedMessage
17275          implements GetJournalStateResponseProtoOrBuilder {
17276        // Use GetJournalStateResponseProto.newBuilder() to construct.
17277        private GetJournalStateResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
17278          super(builder);
17279          this.unknownFields = builder.getUnknownFields();
17280        }
17281        private GetJournalStateResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
17282    
17283        private static final GetJournalStateResponseProto defaultInstance;
17284        public static GetJournalStateResponseProto getDefaultInstance() {
17285          return defaultInstance;
17286        }
17287    
17288        public GetJournalStateResponseProto getDefaultInstanceForType() {
17289          return defaultInstance;
17290        }
17291    
17292        private final com.google.protobuf.UnknownFieldSet unknownFields;
17293        @java.lang.Override
17294        public final com.google.protobuf.UnknownFieldSet
17295            getUnknownFields() {
17296          return this.unknownFields;
17297        }
17298        private GetJournalStateResponseProto(
17299            com.google.protobuf.CodedInputStream input,
17300            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17301            throws com.google.protobuf.InvalidProtocolBufferException {
17302          initFields();
17303          int mutable_bitField0_ = 0;
17304          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
17305              com.google.protobuf.UnknownFieldSet.newBuilder();
17306          try {
17307            boolean done = false;
17308            while (!done) {
17309              int tag = input.readTag();
17310              switch (tag) {
17311                case 0:
17312                  done = true;
17313                  break;
17314                default: {
17315                  if (!parseUnknownField(input, unknownFields,
17316                                         extensionRegistry, tag)) {
17317                    done = true;
17318                  }
17319                  break;
17320                }
17321                case 8: {
17322                  bitField0_ |= 0x00000001;
17323                  lastPromisedEpoch_ = input.readUInt64();
17324                  break;
17325                }
17326                case 16: {
17327                  bitField0_ |= 0x00000002;
17328                  httpPort_ = input.readUInt32();
17329                  break;
17330                }
17331                case 26: {
17332                  bitField0_ |= 0x00000004;
17333                  fromURL_ = input.readBytes();
17334                  break;
17335                }
17336              }
17337            }
17338          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
17339            throw e.setUnfinishedMessage(this);
17340          } catch (java.io.IOException e) {
17341            throw new com.google.protobuf.InvalidProtocolBufferException(
17342                e.getMessage()).setUnfinishedMessage(this);
17343          } finally {
17344            this.unknownFields = unknownFields.build();
17345            makeExtensionsImmutable();
17346          }
17347        }
17348        public static final com.google.protobuf.Descriptors.Descriptor
17349            getDescriptor() {
17350          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalStateResponseProto_descriptor;
17351        }
17352    
17353        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
17354            internalGetFieldAccessorTable() {
17355          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalStateResponseProto_fieldAccessorTable
17356              .ensureFieldAccessorsInitialized(
17357                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.Builder.class);
17358        }
17359    
17360        public static com.google.protobuf.Parser<GetJournalStateResponseProto> PARSER =
17361            new com.google.protobuf.AbstractParser<GetJournalStateResponseProto>() {
17362          public GetJournalStateResponseProto parsePartialFrom(
17363              com.google.protobuf.CodedInputStream input,
17364              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17365              throws com.google.protobuf.InvalidProtocolBufferException {
17366            return new GetJournalStateResponseProto(input, extensionRegistry);
17367          }
17368        };
17369    
17370        @java.lang.Override
17371        public com.google.protobuf.Parser<GetJournalStateResponseProto> getParserForType() {
17372          return PARSER;
17373        }
17374    
17375        private int bitField0_;
17376        // required uint64 lastPromisedEpoch = 1;
17377        public static final int LASTPROMISEDEPOCH_FIELD_NUMBER = 1;
17378        private long lastPromisedEpoch_;
17379        /**
17380         * <code>required uint64 lastPromisedEpoch = 1;</code>
17381         */
17382        public boolean hasLastPromisedEpoch() {
17383          return ((bitField0_ & 0x00000001) == 0x00000001);
17384        }
17385        /**
17386         * <code>required uint64 lastPromisedEpoch = 1;</code>
17387         */
17388        public long getLastPromisedEpoch() {
17389          return lastPromisedEpoch_;
17390        }
17391    
17392        // required uint32 httpPort = 2;
17393        public static final int HTTPPORT_FIELD_NUMBER = 2;
17394        private int httpPort_;
17395        /**
17396         * <code>required uint32 httpPort = 2;</code>
17397         *
17398         * <pre>
17399         * Deprecated by fromURL
17400         * </pre>
17401         */
17402        public boolean hasHttpPort() {
17403          return ((bitField0_ & 0x00000002) == 0x00000002);
17404        }
17405        /**
17406         * <code>required uint32 httpPort = 2;</code>
17407         *
17408         * <pre>
17409         * Deprecated by fromURL
17410         * </pre>
17411         */
17412        public int getHttpPort() {
17413          return httpPort_;
17414        }
17415    
17416        // optional string fromURL = 3;
17417        public static final int FROMURL_FIELD_NUMBER = 3;
17418        private java.lang.Object fromURL_;
17419        /**
17420         * <code>optional string fromURL = 3;</code>
17421         */
17422        public boolean hasFromURL() {
17423          return ((bitField0_ & 0x00000004) == 0x00000004);
17424        }
17425        /**
17426         * <code>optional string fromURL = 3;</code>
17427         */
17428        public java.lang.String getFromURL() {
17429          java.lang.Object ref = fromURL_;
17430          if (ref instanceof java.lang.String) {
17431            return (java.lang.String) ref;
17432          } else {
17433            com.google.protobuf.ByteString bs = 
17434                (com.google.protobuf.ByteString) ref;
17435            java.lang.String s = bs.toStringUtf8();
17436            if (bs.isValidUtf8()) {
17437              fromURL_ = s;
17438            }
17439            return s;
17440          }
17441        }
17442        /**
17443         * <code>optional string fromURL = 3;</code>
17444         */
17445        public com.google.protobuf.ByteString
17446            getFromURLBytes() {
17447          java.lang.Object ref = fromURL_;
17448          if (ref instanceof java.lang.String) {
17449            com.google.protobuf.ByteString b = 
17450                com.google.protobuf.ByteString.copyFromUtf8(
17451                    (java.lang.String) ref);
17452            fromURL_ = b;
17453            return b;
17454          } else {
17455            return (com.google.protobuf.ByteString) ref;
17456          }
17457        }
17458    
17459        private void initFields() {
17460          lastPromisedEpoch_ = 0L;
17461          httpPort_ = 0;
17462          fromURL_ = "";
17463        }
17464        private byte memoizedIsInitialized = -1;
17465        public final boolean isInitialized() {
17466          byte isInitialized = memoizedIsInitialized;
17467          if (isInitialized != -1) return isInitialized == 1;
17468    
17469          if (!hasLastPromisedEpoch()) {
17470            memoizedIsInitialized = 0;
17471            return false;
17472          }
17473          if (!hasHttpPort()) {
17474            memoizedIsInitialized = 0;
17475            return false;
17476          }
17477          memoizedIsInitialized = 1;
17478          return true;
17479        }
17480    
17481        public void writeTo(com.google.protobuf.CodedOutputStream output)
17482                            throws java.io.IOException {
17483          getSerializedSize();
17484          if (((bitField0_ & 0x00000001) == 0x00000001)) {
17485            output.writeUInt64(1, lastPromisedEpoch_);
17486          }
17487          if (((bitField0_ & 0x00000002) == 0x00000002)) {
17488            output.writeUInt32(2, httpPort_);
17489          }
17490          if (((bitField0_ & 0x00000004) == 0x00000004)) {
17491            output.writeBytes(3, getFromURLBytes());
17492          }
17493          getUnknownFields().writeTo(output);
17494        }
17495    
17496        private int memoizedSerializedSize = -1;
17497        public int getSerializedSize() {
17498          int size = memoizedSerializedSize;
17499          if (size != -1) return size;
17500    
17501          size = 0;
17502          if (((bitField0_ & 0x00000001) == 0x00000001)) {
17503            size += com.google.protobuf.CodedOutputStream
17504              .computeUInt64Size(1, lastPromisedEpoch_);
17505          }
17506          if (((bitField0_ & 0x00000002) == 0x00000002)) {
17507            size += com.google.protobuf.CodedOutputStream
17508              .computeUInt32Size(2, httpPort_);
17509          }
17510          if (((bitField0_ & 0x00000004) == 0x00000004)) {
17511            size += com.google.protobuf.CodedOutputStream
17512              .computeBytesSize(3, getFromURLBytes());
17513          }
17514          size += getUnknownFields().getSerializedSize();
17515          memoizedSerializedSize = size;
17516          return size;
17517        }
17518    
17519        private static final long serialVersionUID = 0L;
17520        @java.lang.Override
17521        protected java.lang.Object writeReplace()
17522            throws java.io.ObjectStreamException {
17523          return super.writeReplace();
17524        }
17525    
17526        @java.lang.Override
17527        public boolean equals(final java.lang.Object obj) {
17528          if (obj == this) {
17529           return true;
17530          }
17531          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto)) {
17532            return super.equals(obj);
17533          }
17534          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) obj;
17535    
17536          boolean result = true;
17537          result = result && (hasLastPromisedEpoch() == other.hasLastPromisedEpoch());
17538          if (hasLastPromisedEpoch()) {
17539            result = result && (getLastPromisedEpoch()
17540                == other.getLastPromisedEpoch());
17541          }
17542          result = result && (hasHttpPort() == other.hasHttpPort());
17543          if (hasHttpPort()) {
17544            result = result && (getHttpPort()
17545                == other.getHttpPort());
17546          }
17547          result = result && (hasFromURL() == other.hasFromURL());
17548          if (hasFromURL()) {
17549            result = result && getFromURL()
17550                .equals(other.getFromURL());
17551          }
17552          result = result &&
17553              getUnknownFields().equals(other.getUnknownFields());
17554          return result;
17555        }
17556    
17557        private int memoizedHashCode = 0;
17558        @java.lang.Override
17559        public int hashCode() {
17560          if (memoizedHashCode != 0) {
17561            return memoizedHashCode;
17562          }
17563          int hash = 41;
17564          hash = (19 * hash) + getDescriptorForType().hashCode();
17565          if (hasLastPromisedEpoch()) {
17566            hash = (37 * hash) + LASTPROMISEDEPOCH_FIELD_NUMBER;
17567            hash = (53 * hash) + hashLong(getLastPromisedEpoch());
17568          }
17569          if (hasHttpPort()) {
17570            hash = (37 * hash) + HTTPPORT_FIELD_NUMBER;
17571            hash = (53 * hash) + getHttpPort();
17572          }
17573          if (hasFromURL()) {
17574            hash = (37 * hash) + FROMURL_FIELD_NUMBER;
17575            hash = (53 * hash) + getFromURL().hashCode();
17576          }
17577          hash = (29 * hash) + getUnknownFields().hashCode();
17578          memoizedHashCode = hash;
17579          return hash;
17580        }
17581    
17582        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
17583            com.google.protobuf.ByteString data)
17584            throws com.google.protobuf.InvalidProtocolBufferException {
17585          return PARSER.parseFrom(data);
17586        }
17587        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
17588            com.google.protobuf.ByteString data,
17589            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17590            throws com.google.protobuf.InvalidProtocolBufferException {
17591          return PARSER.parseFrom(data, extensionRegistry);
17592        }
17593        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(byte[] data)
17594            throws com.google.protobuf.InvalidProtocolBufferException {
17595          return PARSER.parseFrom(data);
17596        }
17597        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
17598            byte[] data,
17599            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17600            throws com.google.protobuf.InvalidProtocolBufferException {
17601          return PARSER.parseFrom(data, extensionRegistry);
17602        }
17603        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(java.io.InputStream input)
17604            throws java.io.IOException {
17605          return PARSER.parseFrom(input);
17606        }
17607        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
17608            java.io.InputStream input,
17609            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17610            throws java.io.IOException {
17611          return PARSER.parseFrom(input, extensionRegistry);
17612        }
17613        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseDelimitedFrom(java.io.InputStream input)
17614            throws java.io.IOException {
17615          return PARSER.parseDelimitedFrom(input);
17616        }
17617        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseDelimitedFrom(
17618            java.io.InputStream input,
17619            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17620            throws java.io.IOException {
17621          return PARSER.parseDelimitedFrom(input, extensionRegistry);
17622        }
17623        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
17624            com.google.protobuf.CodedInputStream input)
17625            throws java.io.IOException {
17626          return PARSER.parseFrom(input);
17627        }
17628        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
17629            com.google.protobuf.CodedInputStream input,
17630            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17631            throws java.io.IOException {
17632          return PARSER.parseFrom(input, extensionRegistry);
17633        }
17634    
17635        public static Builder newBuilder() { return Builder.create(); }
17636        public Builder newBuilderForType() { return newBuilder(); }
17637        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto prototype) {
17638          return newBuilder().mergeFrom(prototype);
17639        }
17640        public Builder toBuilder() { return newBuilder(this); }
17641    
17642        @java.lang.Override
17643        protected Builder newBuilderForType(
17644            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
17645          Builder builder = new Builder(parent);
17646          return builder;
17647        }
17648        /**
17649         * Protobuf type {@code hadoop.hdfs.qjournal.GetJournalStateResponseProto}
17650         */
17651        public static final class Builder extends
17652            com.google.protobuf.GeneratedMessage.Builder<Builder>
17653           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProtoOrBuilder {
17654          public static final com.google.protobuf.Descriptors.Descriptor
17655              getDescriptor() {
17656            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalStateResponseProto_descriptor;
17657          }
17658    
17659          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
17660              internalGetFieldAccessorTable() {
17661            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalStateResponseProto_fieldAccessorTable
17662                .ensureFieldAccessorsInitialized(
17663                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.Builder.class);
17664          }
17665    
17666          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.newBuilder()
17667          private Builder() {
17668            maybeForceBuilderInitialization();
17669          }
17670    
17671          private Builder(
17672              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
17673            super(parent);
17674            maybeForceBuilderInitialization();
17675          }
17676          private void maybeForceBuilderInitialization() {
17677            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
17678            }
17679          }
17680          private static Builder create() {
17681            return new Builder();
17682          }
17683    
17684          public Builder clear() {
17685            super.clear();
17686            lastPromisedEpoch_ = 0L;
17687            bitField0_ = (bitField0_ & ~0x00000001);
17688            httpPort_ = 0;
17689            bitField0_ = (bitField0_ & ~0x00000002);
17690            fromURL_ = "";
17691            bitField0_ = (bitField0_ & ~0x00000004);
17692            return this;
17693          }
17694    
17695          public Builder clone() {
17696            return create().mergeFrom(buildPartial());
17697          }
17698    
17699          public com.google.protobuf.Descriptors.Descriptor
17700              getDescriptorForType() {
17701            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetJournalStateResponseProto_descriptor;
17702          }
17703    
17704          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto getDefaultInstanceForType() {
17705            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance();
17706          }
17707    
17708          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto build() {
17709            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto result = buildPartial();
17710            if (!result.isInitialized()) {
17711              throw newUninitializedMessageException(result);
17712            }
17713            return result;
17714          }
17715    
17716          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto buildPartial() {
17717            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto(this);
17718            int from_bitField0_ = bitField0_;
17719            int to_bitField0_ = 0;
17720            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
17721              to_bitField0_ |= 0x00000001;
17722            }
17723            result.lastPromisedEpoch_ = lastPromisedEpoch_;
17724            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
17725              to_bitField0_ |= 0x00000002;
17726            }
17727            result.httpPort_ = httpPort_;
17728            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
17729              to_bitField0_ |= 0x00000004;
17730            }
17731            result.fromURL_ = fromURL_;
17732            result.bitField0_ = to_bitField0_;
17733            onBuilt();
17734            return result;
17735          }
17736    
17737          public Builder mergeFrom(com.google.protobuf.Message other) {
17738            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) {
17739              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto)other);
17740            } else {
17741              super.mergeFrom(other);
17742              return this;
17743            }
17744          }
17745    
17746          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto other) {
17747            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance()) return this;
17748            if (other.hasLastPromisedEpoch()) {
17749              setLastPromisedEpoch(other.getLastPromisedEpoch());
17750            }
17751            if (other.hasHttpPort()) {
17752              setHttpPort(other.getHttpPort());
17753            }
17754            if (other.hasFromURL()) {
17755              bitField0_ |= 0x00000004;
17756              fromURL_ = other.fromURL_;
17757              onChanged();
17758            }
17759            this.mergeUnknownFields(other.getUnknownFields());
17760            return this;
17761          }
17762    
17763          public final boolean isInitialized() {
17764            if (!hasLastPromisedEpoch()) {
17765              
17766              return false;
17767            }
17768            if (!hasHttpPort()) {
17769              
17770              return false;
17771            }
17772            return true;
17773          }
17774    
17775          public Builder mergeFrom(
17776              com.google.protobuf.CodedInputStream input,
17777              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
17778              throws java.io.IOException {
17779            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parsedMessage = null;
17780            try {
17781              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
17782            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
17783              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) e.getUnfinishedMessage();
17784              throw e;
17785            } finally {
17786              if (parsedMessage != null) {
17787                mergeFrom(parsedMessage);
17788              }
17789            }
17790            return this;
17791          }
17792          private int bitField0_;
17793    
17794          // required uint64 lastPromisedEpoch = 1;
17795          private long lastPromisedEpoch_ ;
17796          /**
17797           * <code>required uint64 lastPromisedEpoch = 1;</code>
17798           */
17799          public boolean hasLastPromisedEpoch() {
17800            return ((bitField0_ & 0x00000001) == 0x00000001);
17801          }
17802          /**
17803           * <code>required uint64 lastPromisedEpoch = 1;</code>
17804           */
17805          public long getLastPromisedEpoch() {
17806            return lastPromisedEpoch_;
17807          }
17808          /**
17809           * <code>required uint64 lastPromisedEpoch = 1;</code>
17810           */
17811          public Builder setLastPromisedEpoch(long value) {
17812            bitField0_ |= 0x00000001;
17813            lastPromisedEpoch_ = value;
17814            onChanged();
17815            return this;
17816          }
17817          /**
17818           * <code>required uint64 lastPromisedEpoch = 1;</code>
17819           */
17820          public Builder clearLastPromisedEpoch() {
17821            bitField0_ = (bitField0_ & ~0x00000001);
17822            lastPromisedEpoch_ = 0L;
17823            onChanged();
17824            return this;
17825          }
17826    
17827          // required uint32 httpPort = 2;
17828          private int httpPort_ ;
17829          /**
17830           * <code>required uint32 httpPort = 2;</code>
17831           *
17832           * <pre>
17833           * Deprecated by fromURL
17834           * </pre>
17835           */
17836          public boolean hasHttpPort() {
17837            return ((bitField0_ & 0x00000002) == 0x00000002);
17838          }
17839          /**
17840           * <code>required uint32 httpPort = 2;</code>
17841           *
17842           * <pre>
17843           * Deprecated by fromURL
17844           * </pre>
17845           */
17846          public int getHttpPort() {
17847            return httpPort_;
17848          }
17849          /**
17850           * <code>required uint32 httpPort = 2;</code>
17851           *
17852           * <pre>
17853           * Deprecated by fromURL
17854           * </pre>
17855           */
17856          public Builder setHttpPort(int value) {
17857            bitField0_ |= 0x00000002;
17858            httpPort_ = value;
17859            onChanged();
17860            return this;
17861          }
17862          /**
17863           * <code>required uint32 httpPort = 2;</code>
17864           *
17865           * <pre>
17866           * Deprecated by fromURL
17867           * </pre>
17868           */
17869          public Builder clearHttpPort() {
17870            bitField0_ = (bitField0_ & ~0x00000002);
17871            httpPort_ = 0;
17872            onChanged();
17873            return this;
17874          }
17875    
17876          // optional string fromURL = 3;
17877          private java.lang.Object fromURL_ = "";
17878          /**
17879           * <code>optional string fromURL = 3;</code>
17880           */
17881          public boolean hasFromURL() {
17882            return ((bitField0_ & 0x00000004) == 0x00000004);
17883          }
17884          /**
17885           * <code>optional string fromURL = 3;</code>
17886           */
17887          public java.lang.String getFromURL() {
17888            java.lang.Object ref = fromURL_;
17889            if (!(ref instanceof java.lang.String)) {
17890              java.lang.String s = ((com.google.protobuf.ByteString) ref)
17891                  .toStringUtf8();
17892              fromURL_ = s;
17893              return s;
17894            } else {
17895              return (java.lang.String) ref;
17896            }
17897          }
17898          /**
17899           * <code>optional string fromURL = 3;</code>
17900           */
17901          public com.google.protobuf.ByteString
17902              getFromURLBytes() {
17903            java.lang.Object ref = fromURL_;
17904            if (ref instanceof String) {
17905              com.google.protobuf.ByteString b = 
17906                  com.google.protobuf.ByteString.copyFromUtf8(
17907                      (java.lang.String) ref);
17908              fromURL_ = b;
17909              return b;
17910            } else {
17911              return (com.google.protobuf.ByteString) ref;
17912            }
17913          }
17914          /**
17915           * <code>optional string fromURL = 3;</code>
17916           */
17917          public Builder setFromURL(
17918              java.lang.String value) {
17919            if (value == null) {
17920        throw new NullPointerException();
17921      }
17922      bitField0_ |= 0x00000004;
17923            fromURL_ = value;
17924            onChanged();
17925            return this;
17926          }
17927          /**
17928           * <code>optional string fromURL = 3;</code>
17929           */
17930          public Builder clearFromURL() {
17931            bitField0_ = (bitField0_ & ~0x00000004);
17932            fromURL_ = getDefaultInstance().getFromURL();
17933            onChanged();
17934            return this;
17935          }
17936          /**
17937           * <code>optional string fromURL = 3;</code>
17938           */
17939          public Builder setFromURLBytes(
17940              com.google.protobuf.ByteString value) {
17941            if (value == null) {
17942        throw new NullPointerException();
17943      }
17944      bitField0_ |= 0x00000004;
17945            fromURL_ = value;
17946            onChanged();
17947            return this;
17948          }
17949    
17950          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.GetJournalStateResponseProto)
17951        }
17952    
17953        static {
17954          defaultInstance = new GetJournalStateResponseProto(true);
17955          defaultInstance.initFields();
17956        }
17957    
17958        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.GetJournalStateResponseProto)
17959      }
17960    
17961      public interface FormatRequestProtoOrBuilder
17962          extends com.google.protobuf.MessageOrBuilder {
17963    
17964        // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
17965        /**
17966         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
17967         */
17968        boolean hasJid();
17969        /**
17970         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
17971         */
17972        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
17973        /**
17974         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
17975         */
17976        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
17977    
17978        // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
17979        /**
17980         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
17981         */
17982        boolean hasNsInfo();
17983        /**
17984         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
17985         */
17986        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo();
17987        /**
17988         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
17989         */
17990        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder();
17991      }
17992      /**
17993       * Protobuf type {@code hadoop.hdfs.qjournal.FormatRequestProto}
17994       *
17995       * <pre>
17996       **
17997       * format()
17998       * </pre>
17999       */
18000      public static final class FormatRequestProto extends
18001          com.google.protobuf.GeneratedMessage
18002          implements FormatRequestProtoOrBuilder {
18003        // Use FormatRequestProto.newBuilder() to construct.
18004        private FormatRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
18005          super(builder);
18006          this.unknownFields = builder.getUnknownFields();
18007        }
18008        private FormatRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
18009    
18010        private static final FormatRequestProto defaultInstance;
18011        public static FormatRequestProto getDefaultInstance() {
18012          return defaultInstance;
18013        }
18014    
18015        public FormatRequestProto getDefaultInstanceForType() {
18016          return defaultInstance;
18017        }
18018    
18019        private final com.google.protobuf.UnknownFieldSet unknownFields;
18020        @java.lang.Override
18021        public final com.google.protobuf.UnknownFieldSet
18022            getUnknownFields() {
18023          return this.unknownFields;
18024        }
18025        private FormatRequestProto(
18026            com.google.protobuf.CodedInputStream input,
18027            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
18028            throws com.google.protobuf.InvalidProtocolBufferException {
18029          initFields();
18030          int mutable_bitField0_ = 0;
18031          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
18032              com.google.protobuf.UnknownFieldSet.newBuilder();
18033          try {
18034            boolean done = false;
18035            while (!done) {
18036              int tag = input.readTag();
18037              switch (tag) {
18038                case 0:
18039                  done = true;
18040                  break;
18041                default: {
18042                  if (!parseUnknownField(input, unknownFields,
18043                                         extensionRegistry, tag)) {
18044                    done = true;
18045                  }
18046                  break;
18047                }
18048                case 10: {
18049                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
18050                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
18051                    subBuilder = jid_.toBuilder();
18052                  }
18053                  jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
18054                  if (subBuilder != null) {
18055                    subBuilder.mergeFrom(jid_);
18056                    jid_ = subBuilder.buildPartial();
18057                  }
18058                  bitField0_ |= 0x00000001;
18059                  break;
18060                }
18061                case 18: {
18062                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder subBuilder = null;
18063                  if (((bitField0_ & 0x00000002) == 0x00000002)) {
18064                    subBuilder = nsInfo_.toBuilder();
18065                  }
18066                  nsInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.PARSER, extensionRegistry);
18067                  if (subBuilder != null) {
18068                    subBuilder.mergeFrom(nsInfo_);
18069                    nsInfo_ = subBuilder.buildPartial();
18070                  }
18071                  bitField0_ |= 0x00000002;
18072                  break;
18073                }
18074              }
18075            }
18076          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
18077            throw e.setUnfinishedMessage(this);
18078          } catch (java.io.IOException e) {
18079            throw new com.google.protobuf.InvalidProtocolBufferException(
18080                e.getMessage()).setUnfinishedMessage(this);
18081          } finally {
18082            this.unknownFields = unknownFields.build();
18083            makeExtensionsImmutable();
18084          }
18085        }
18086        public static final com.google.protobuf.Descriptors.Descriptor
18087            getDescriptor() {
18088          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FormatRequestProto_descriptor;
18089        }
18090    
18091        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
18092            internalGetFieldAccessorTable() {
18093          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FormatRequestProto_fieldAccessorTable
18094              .ensureFieldAccessorsInitialized(
18095                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.Builder.class);
18096        }
18097    
18098        public static com.google.protobuf.Parser<FormatRequestProto> PARSER =
18099            new com.google.protobuf.AbstractParser<FormatRequestProto>() {
18100          public FormatRequestProto parsePartialFrom(
18101              com.google.protobuf.CodedInputStream input,
18102              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
18103              throws com.google.protobuf.InvalidProtocolBufferException {
18104            return new FormatRequestProto(input, extensionRegistry);
18105          }
18106        };
18107    
18108        @java.lang.Override
18109        public com.google.protobuf.Parser<FormatRequestProto> getParserForType() {
18110          return PARSER;
18111        }
18112    
18113        private int bitField0_;
18114        // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
18115        public static final int JID_FIELD_NUMBER = 1;
18116        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
18117        /**
18118         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
18119         */
18120        public boolean hasJid() {
18121          return ((bitField0_ & 0x00000001) == 0x00000001);
18122        }
18123        /**
18124         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
18125         */
18126        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
18127          return jid_;
18128        }
18129        /**
18130         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
18131         */
18132        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
18133          return jid_;
18134        }
18135    
18136        // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
18137        public static final int NSINFO_FIELD_NUMBER = 2;
18138        private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_;
18139        /**
18140         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
18141         */
18142        public boolean hasNsInfo() {
18143          return ((bitField0_ & 0x00000002) == 0x00000002);
18144        }
18145        /**
18146         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
18147         */
18148        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
18149          return nsInfo_;
18150        }
18151        /**
18152         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
18153         */
18154        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
18155          return nsInfo_;
18156        }
18157    
18158        private void initFields() {
18159          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
18160          nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
18161        }
18162        private byte memoizedIsInitialized = -1;
18163        public final boolean isInitialized() {
18164          byte isInitialized = memoizedIsInitialized;
18165          if (isInitialized != -1) return isInitialized == 1;
18166    
18167          if (!hasJid()) {
18168            memoizedIsInitialized = 0;
18169            return false;
18170          }
18171          if (!hasNsInfo()) {
18172            memoizedIsInitialized = 0;
18173            return false;
18174          }
18175          if (!getJid().isInitialized()) {
18176            memoizedIsInitialized = 0;
18177            return false;
18178          }
18179          if (!getNsInfo().isInitialized()) {
18180            memoizedIsInitialized = 0;
18181            return false;
18182          }
18183          memoizedIsInitialized = 1;
18184          return true;
18185        }
18186    
18187        public void writeTo(com.google.protobuf.CodedOutputStream output)
18188                            throws java.io.IOException {
18189          getSerializedSize();
18190          if (((bitField0_ & 0x00000001) == 0x00000001)) {
18191            output.writeMessage(1, jid_);
18192          }
18193          if (((bitField0_ & 0x00000002) == 0x00000002)) {
18194            output.writeMessage(2, nsInfo_);
18195          }
18196          getUnknownFields().writeTo(output);
18197        }
18198    
18199        private int memoizedSerializedSize = -1;
18200        public int getSerializedSize() {
18201          int size = memoizedSerializedSize;
18202          if (size != -1) return size;
18203    
18204          size = 0;
18205          if (((bitField0_ & 0x00000001) == 0x00000001)) {
18206            size += com.google.protobuf.CodedOutputStream
18207              .computeMessageSize(1, jid_);
18208          }
18209          if (((bitField0_ & 0x00000002) == 0x00000002)) {
18210            size += com.google.protobuf.CodedOutputStream
18211              .computeMessageSize(2, nsInfo_);
18212          }
18213          size += getUnknownFields().getSerializedSize();
18214          memoizedSerializedSize = size;
18215          return size;
18216        }
18217    
18218        private static final long serialVersionUID = 0L;
18219        @java.lang.Override
18220        protected java.lang.Object writeReplace()
18221            throws java.io.ObjectStreamException {
18222          return super.writeReplace();
18223        }
18224    
18225        @java.lang.Override
18226        public boolean equals(final java.lang.Object obj) {
18227          if (obj == this) {
18228           return true;
18229          }
18230          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)) {
18231            return super.equals(obj);
18232          }
18233          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto) obj;
18234    
18235          boolean result = true;
18236          result = result && (hasJid() == other.hasJid());
18237          if (hasJid()) {
18238            result = result && getJid()
18239                .equals(other.getJid());
18240          }
18241          result = result && (hasNsInfo() == other.hasNsInfo());
18242          if (hasNsInfo()) {
18243            result = result && getNsInfo()
18244                .equals(other.getNsInfo());
18245          }
18246          result = result &&
18247              getUnknownFields().equals(other.getUnknownFields());
18248          return result;
18249        }
18250    
18251        private int memoizedHashCode = 0;
18252        @java.lang.Override
18253        public int hashCode() {
18254          if (memoizedHashCode != 0) {
18255            return memoizedHashCode;
18256          }
18257          int hash = 41;
18258          hash = (19 * hash) + getDescriptorForType().hashCode();
18259          if (hasJid()) {
18260            hash = (37 * hash) + JID_FIELD_NUMBER;
18261            hash = (53 * hash) + getJid().hashCode();
18262          }
18263          if (hasNsInfo()) {
18264            hash = (37 * hash) + NSINFO_FIELD_NUMBER;
18265            hash = (53 * hash) + getNsInfo().hashCode();
18266          }
18267          hash = (29 * hash) + getUnknownFields().hashCode();
18268          memoizedHashCode = hash;
18269          return hash;
18270        }
18271    
18272        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
18273            com.google.protobuf.ByteString data)
18274            throws com.google.protobuf.InvalidProtocolBufferException {
18275          return PARSER.parseFrom(data);
18276        }
18277        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
18278            com.google.protobuf.ByteString data,
18279            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
18280            throws com.google.protobuf.InvalidProtocolBufferException {
18281          return PARSER.parseFrom(data, extensionRegistry);
18282        }
18283        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(byte[] data)
18284            throws com.google.protobuf.InvalidProtocolBufferException {
18285          return PARSER.parseFrom(data);
18286        }
18287        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
18288            byte[] data,
18289            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
18290            throws com.google.protobuf.InvalidProtocolBufferException {
18291          return PARSER.parseFrom(data, extensionRegistry);
18292        }
18293        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(java.io.InputStream input)
18294            throws java.io.IOException {
18295          return PARSER.parseFrom(input);
18296        }
18297        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
18298            java.io.InputStream input,
18299            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
18300            throws java.io.IOException {
18301          return PARSER.parseFrom(input, extensionRegistry);
18302        }
18303        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseDelimitedFrom(java.io.InputStream input)
18304            throws java.io.IOException {
18305          return PARSER.parseDelimitedFrom(input);
18306        }
18307        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseDelimitedFrom(
18308            java.io.InputStream input,
18309            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
18310            throws java.io.IOException {
18311          return PARSER.parseDelimitedFrom(input, extensionRegistry);
18312        }
18313        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
18314            com.google.protobuf.CodedInputStream input)
18315            throws java.io.IOException {
18316          return PARSER.parseFrom(input);
18317        }
18318        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
18319            com.google.protobuf.CodedInputStream input,
18320            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
18321            throws java.io.IOException {
18322          return PARSER.parseFrom(input, extensionRegistry);
18323        }
18324    
18325        public static Builder newBuilder() { return Builder.create(); }
18326        public Builder newBuilderForType() { return newBuilder(); }
18327        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto prototype) {
18328          return newBuilder().mergeFrom(prototype);
18329        }
18330        public Builder toBuilder() { return newBuilder(this); }
18331    
18332        @java.lang.Override
18333        protected Builder newBuilderForType(
18334            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
18335          Builder builder = new Builder(parent);
18336          return builder;
18337        }
18338        /**
18339         * Protobuf type {@code hadoop.hdfs.qjournal.FormatRequestProto}
18340         *
18341         * <pre>
18342         **
18343         * format()
18344         * </pre>
18345         */
18346        public static final class Builder extends
18347            com.google.protobuf.GeneratedMessage.Builder<Builder>
18348           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProtoOrBuilder {
18349          public static final com.google.protobuf.Descriptors.Descriptor
18350              getDescriptor() {
18351            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FormatRequestProto_descriptor;
18352          }
18353    
18354          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
18355              internalGetFieldAccessorTable() {
18356            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FormatRequestProto_fieldAccessorTable
18357                .ensureFieldAccessorsInitialized(
18358                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.Builder.class);
18359          }
18360    
18361          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.newBuilder()
18362          private Builder() {
18363            maybeForceBuilderInitialization();
18364          }
18365    
18366          private Builder(
18367              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
18368            super(parent);
18369            maybeForceBuilderInitialization();
18370          }
18371          private void maybeForceBuilderInitialization() {
18372            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
18373              getJidFieldBuilder();
18374              getNsInfoFieldBuilder();
18375            }
18376          }
18377          private static Builder create() {
18378            return new Builder();
18379          }
18380    
18381          public Builder clear() {
18382            super.clear();
18383            if (jidBuilder_ == null) {
18384              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
18385            } else {
18386              jidBuilder_.clear();
18387            }
18388            bitField0_ = (bitField0_ & ~0x00000001);
18389            if (nsInfoBuilder_ == null) {
18390              nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
18391            } else {
18392              nsInfoBuilder_.clear();
18393            }
18394            bitField0_ = (bitField0_ & ~0x00000002);
18395            return this;
18396          }
18397    
18398          public Builder clone() {
18399            return create().mergeFrom(buildPartial());
18400          }
18401    
18402          public com.google.protobuf.Descriptors.Descriptor
18403              getDescriptorForType() {
18404            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FormatRequestProto_descriptor;
18405          }
18406    
18407          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto getDefaultInstanceForType() {
18408            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance();
18409          }
18410    
18411          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto build() {
18412            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto result = buildPartial();
18413            if (!result.isInitialized()) {
18414              throw newUninitializedMessageException(result);
18415            }
18416            return result;
18417          }
18418    
18419          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto buildPartial() {
18420            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto(this);
18421            int from_bitField0_ = bitField0_;
18422            int to_bitField0_ = 0;
18423            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
18424              to_bitField0_ |= 0x00000001;
18425            }
18426            if (jidBuilder_ == null) {
18427              result.jid_ = jid_;
18428            } else {
18429              result.jid_ = jidBuilder_.build();
18430            }
18431            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
18432              to_bitField0_ |= 0x00000002;
18433            }
18434            if (nsInfoBuilder_ == null) {
18435              result.nsInfo_ = nsInfo_;
18436            } else {
18437              result.nsInfo_ = nsInfoBuilder_.build();
18438            }
18439            result.bitField0_ = to_bitField0_;
18440            onBuilt();
18441            return result;
18442          }
18443    
18444          public Builder mergeFrom(com.google.protobuf.Message other) {
18445            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto) {
18446              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)other);
18447            } else {
18448              super.mergeFrom(other);
18449              return this;
18450            }
18451          }
18452    
18453          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto other) {
18454            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance()) return this;
18455            if (other.hasJid()) {
18456              mergeJid(other.getJid());
18457            }
18458            if (other.hasNsInfo()) {
18459              mergeNsInfo(other.getNsInfo());
18460            }
18461            this.mergeUnknownFields(other.getUnknownFields());
18462            return this;
18463          }
18464    
18465          public final boolean isInitialized() {
18466            if (!hasJid()) {
18467              
18468              return false;
18469            }
18470            if (!hasNsInfo()) {
18471              
18472              return false;
18473            }
18474            if (!getJid().isInitialized()) {
18475              
18476              return false;
18477            }
18478            if (!getNsInfo().isInitialized()) {
18479              
18480              return false;
18481            }
18482            return true;
18483          }
18484    
18485          public Builder mergeFrom(
18486              com.google.protobuf.CodedInputStream input,
18487              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
18488              throws java.io.IOException {
18489            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parsedMessage = null;
18490            try {
18491              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
18492            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
18493              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto) e.getUnfinishedMessage();
18494              throw e;
18495            } finally {
18496              if (parsedMessage != null) {
18497                mergeFrom(parsedMessage);
18498              }
18499            }
18500            return this;
18501          }
18502          private int bitField0_;
18503    
18504          // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
18505          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
18506          private com.google.protobuf.SingleFieldBuilder<
18507              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
18508          /**
18509           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
18510           */
18511          public boolean hasJid() {
18512            return ((bitField0_ & 0x00000001) == 0x00000001);
18513          }
18514          /**
18515           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
18516           */
18517          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
18518            if (jidBuilder_ == null) {
18519              return jid_;
18520            } else {
18521              return jidBuilder_.getMessage();
18522            }
18523          }
18524          /**
18525           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
18526           */
18527          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
18528            if (jidBuilder_ == null) {
18529              if (value == null) {
18530                throw new NullPointerException();
18531              }
18532              jid_ = value;
18533              onChanged();
18534            } else {
18535              jidBuilder_.setMessage(value);
18536            }
18537            bitField0_ |= 0x00000001;
18538            return this;
18539          }
18540          /**
18541           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
18542           */
18543          public Builder setJid(
18544              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
18545            if (jidBuilder_ == null) {
18546              jid_ = builderForValue.build();
18547              onChanged();
18548            } else {
18549              jidBuilder_.setMessage(builderForValue.build());
18550            }
18551            bitField0_ |= 0x00000001;
18552            return this;
18553          }
18554          /**
18555           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
18556           */
18557          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
18558            if (jidBuilder_ == null) {
18559              if (((bitField0_ & 0x00000001) == 0x00000001) &&
18560                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
18561                jid_ =
18562                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
18563              } else {
18564                jid_ = value;
18565              }
18566              onChanged();
18567            } else {
18568              jidBuilder_.mergeFrom(value);
18569            }
18570            bitField0_ |= 0x00000001;
18571            return this;
18572          }
18573          /**
18574           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
18575           */
18576          public Builder clearJid() {
18577            if (jidBuilder_ == null) {
18578              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
18579              onChanged();
18580            } else {
18581              jidBuilder_.clear();
18582            }
18583            bitField0_ = (bitField0_ & ~0x00000001);
18584            return this;
18585          }
18586          /**
18587           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
18588           */
18589          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
18590            bitField0_ |= 0x00000001;
18591            onChanged();
18592            return getJidFieldBuilder().getBuilder();
18593          }
18594          /**
18595           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
18596           */
18597          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
18598            if (jidBuilder_ != null) {
18599              return jidBuilder_.getMessageOrBuilder();
18600            } else {
18601              return jid_;
18602            }
18603          }
18604          /**
18605           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
18606           */
18607          private com.google.protobuf.SingleFieldBuilder<
18608              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
18609              getJidFieldBuilder() {
18610            if (jidBuilder_ == null) {
18611              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
18612                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
18613                      jid_,
18614                      getParentForChildren(),
18615                      isClean());
18616              jid_ = null;
18617            }
18618            return jidBuilder_;
18619          }
18620    
18621          // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
18622          private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
18623          private com.google.protobuf.SingleFieldBuilder<
18624              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> nsInfoBuilder_;
18625          /**
18626           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
18627           */
18628          public boolean hasNsInfo() {
18629            return ((bitField0_ & 0x00000002) == 0x00000002);
18630          }
18631          /**
18632           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
18633           */
18634          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
18635            if (nsInfoBuilder_ == null) {
18636              return nsInfo_;
18637            } else {
18638              return nsInfoBuilder_.getMessage();
18639            }
18640          }
18641          /**
18642           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
18643           */
18644          public Builder setNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
18645            if (nsInfoBuilder_ == null) {
18646              if (value == null) {
18647                throw new NullPointerException();
18648              }
18649              nsInfo_ = value;
18650              onChanged();
18651            } else {
18652              nsInfoBuilder_.setMessage(value);
18653            }
18654            bitField0_ |= 0x00000002;
18655            return this;
18656          }
18657          /**
18658           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
18659           */
18660          public Builder setNsInfo(
18661              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder builderForValue) {
18662            if (nsInfoBuilder_ == null) {
18663              nsInfo_ = builderForValue.build();
18664              onChanged();
18665            } else {
18666              nsInfoBuilder_.setMessage(builderForValue.build());
18667            }
18668            bitField0_ |= 0x00000002;
18669            return this;
18670          }
18671          /**
18672           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
18673           */
18674          public Builder mergeNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
18675            if (nsInfoBuilder_ == null) {
18676              if (((bitField0_ & 0x00000002) == 0x00000002) &&
18677                  nsInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance()) {
18678                nsInfo_ =
18679                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.newBuilder(nsInfo_).mergeFrom(value).buildPartial();
18680              } else {
18681                nsInfo_ = value;
18682              }
18683              onChanged();
18684            } else {
18685              nsInfoBuilder_.mergeFrom(value);
18686            }
18687            bitField0_ |= 0x00000002;
18688            return this;
18689          }
18690          /**
18691           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
18692           */
18693          public Builder clearNsInfo() {
18694            if (nsInfoBuilder_ == null) {
18695              nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
18696              onChanged();
18697            } else {
18698              nsInfoBuilder_.clear();
18699            }
18700            bitField0_ = (bitField0_ & ~0x00000002);
18701            return this;
18702          }
18703          /**
18704           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
18705           */
18706          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder getNsInfoBuilder() {
18707            bitField0_ |= 0x00000002;
18708            onChanged();
18709            return getNsInfoFieldBuilder().getBuilder();
18710          }
18711          /**
18712           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
18713           */
18714          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
18715            if (nsInfoBuilder_ != null) {
18716              return nsInfoBuilder_.getMessageOrBuilder();
18717            } else {
18718              return nsInfo_;
18719            }
18720          }
18721          /**
18722           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
18723           */
18724          private com.google.protobuf.SingleFieldBuilder<
18725              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> 
18726              getNsInfoFieldBuilder() {
18727            if (nsInfoBuilder_ == null) {
18728              nsInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
18729                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder>(
18730                      nsInfo_,
18731                      getParentForChildren(),
18732                      isClean());
18733              nsInfo_ = null;
18734            }
18735            return nsInfoBuilder_;
18736          }
18737    
18738          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.FormatRequestProto)
18739        }
18740    
18741        static {
18742          defaultInstance = new FormatRequestProto(true);
18743          defaultInstance.initFields();
18744        }
18745    
18746        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.FormatRequestProto)
18747      }
18748    
18749      public interface FormatResponseProtoOrBuilder
18750          extends com.google.protobuf.MessageOrBuilder {
18751      }
18752      /**
18753       * Protobuf type {@code hadoop.hdfs.qjournal.FormatResponseProto}
18754       */
18755      public static final class FormatResponseProto extends
18756          com.google.protobuf.GeneratedMessage
18757          implements FormatResponseProtoOrBuilder {
18758        // Use FormatResponseProto.newBuilder() to construct.
18759        private FormatResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
18760          super(builder);
18761          this.unknownFields = builder.getUnknownFields();
18762        }
18763        private FormatResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
18764    
18765        private static final FormatResponseProto defaultInstance;
18766        public static FormatResponseProto getDefaultInstance() {
18767          return defaultInstance;
18768        }
18769    
18770        public FormatResponseProto getDefaultInstanceForType() {
18771          return defaultInstance;
18772        }
18773    
18774        private final com.google.protobuf.UnknownFieldSet unknownFields;
18775        @java.lang.Override
18776        public final com.google.protobuf.UnknownFieldSet
18777            getUnknownFields() {
18778          return this.unknownFields;
18779        }
18780        private FormatResponseProto(
18781            com.google.protobuf.CodedInputStream input,
18782            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
18783            throws com.google.protobuf.InvalidProtocolBufferException {
18784          initFields();
18785          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
18786              com.google.protobuf.UnknownFieldSet.newBuilder();
18787          try {
18788            boolean done = false;
18789            while (!done) {
18790              int tag = input.readTag();
18791              switch (tag) {
18792                case 0:
18793                  done = true;
18794                  break;
18795                default: {
18796                  if (!parseUnknownField(input, unknownFields,
18797                                         extensionRegistry, tag)) {
18798                    done = true;
18799                  }
18800                  break;
18801                }
18802              }
18803            }
18804          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
18805            throw e.setUnfinishedMessage(this);
18806          } catch (java.io.IOException e) {
18807            throw new com.google.protobuf.InvalidProtocolBufferException(
18808                e.getMessage()).setUnfinishedMessage(this);
18809          } finally {
18810            this.unknownFields = unknownFields.build();
18811            makeExtensionsImmutable();
18812          }
18813        }
18814        public static final com.google.protobuf.Descriptors.Descriptor
18815            getDescriptor() {
18816          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FormatResponseProto_descriptor;
18817        }
18818    
18819        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
18820            internalGetFieldAccessorTable() {
18821          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FormatResponseProto_fieldAccessorTable
18822              .ensureFieldAccessorsInitialized(
18823                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.Builder.class);
18824        }
18825    
18826        public static com.google.protobuf.Parser<FormatResponseProto> PARSER =
18827            new com.google.protobuf.AbstractParser<FormatResponseProto>() {
18828          public FormatResponseProto parsePartialFrom(
18829              com.google.protobuf.CodedInputStream input,
18830              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
18831              throws com.google.protobuf.InvalidProtocolBufferException {
18832            return new FormatResponseProto(input, extensionRegistry);
18833          }
18834        };
18835    
18836        @java.lang.Override
18837        public com.google.protobuf.Parser<FormatResponseProto> getParserForType() {
18838          return PARSER;
18839        }
18840    
18841        private void initFields() {
18842        }
18843        private byte memoizedIsInitialized = -1;
18844        public final boolean isInitialized() {
18845          byte isInitialized = memoizedIsInitialized;
18846          if (isInitialized != -1) return isInitialized == 1;
18847    
18848          memoizedIsInitialized = 1;
18849          return true;
18850        }
18851    
18852        public void writeTo(com.google.protobuf.CodedOutputStream output)
18853                            throws java.io.IOException {
18854          getSerializedSize();
18855          getUnknownFields().writeTo(output);
18856        }
18857    
18858        private int memoizedSerializedSize = -1;
18859        public int getSerializedSize() {
18860          int size = memoizedSerializedSize;
18861          if (size != -1) return size;
18862    
18863          size = 0;
18864          size += getUnknownFields().getSerializedSize();
18865          memoizedSerializedSize = size;
18866          return size;
18867        }
18868    
18869        private static final long serialVersionUID = 0L;
18870        @java.lang.Override
18871        protected java.lang.Object writeReplace()
18872            throws java.io.ObjectStreamException {
18873          return super.writeReplace();
18874        }
18875    
18876        @java.lang.Override
18877        public boolean equals(final java.lang.Object obj) {
18878          if (obj == this) {
18879           return true;
18880          }
18881          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto)) {
18882            return super.equals(obj);
18883          }
18884          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) obj;
18885    
18886          boolean result = true;
18887          result = result &&
18888              getUnknownFields().equals(other.getUnknownFields());
18889          return result;
18890        }
18891    
18892        private int memoizedHashCode = 0;
18893        @java.lang.Override
18894        public int hashCode() {
18895          if (memoizedHashCode != 0) {
18896            return memoizedHashCode;
18897          }
18898          int hash = 41;
18899          hash = (19 * hash) + getDescriptorForType().hashCode();
18900          hash = (29 * hash) + getUnknownFields().hashCode();
18901          memoizedHashCode = hash;
18902          return hash;
18903        }
18904    
18905        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
18906            com.google.protobuf.ByteString data)
18907            throws com.google.protobuf.InvalidProtocolBufferException {
18908          return PARSER.parseFrom(data);
18909        }
18910        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
18911            com.google.protobuf.ByteString data,
18912            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
18913            throws com.google.protobuf.InvalidProtocolBufferException {
18914          return PARSER.parseFrom(data, extensionRegistry);
18915        }
18916        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(byte[] data)
18917            throws com.google.protobuf.InvalidProtocolBufferException {
18918          return PARSER.parseFrom(data);
18919        }
18920        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
18921            byte[] data,
18922            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
18923            throws com.google.protobuf.InvalidProtocolBufferException {
18924          return PARSER.parseFrom(data, extensionRegistry);
18925        }
18926        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(java.io.InputStream input)
18927            throws java.io.IOException {
18928          return PARSER.parseFrom(input);
18929        }
18930        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
18931            java.io.InputStream input,
18932            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
18933            throws java.io.IOException {
18934          return PARSER.parseFrom(input, extensionRegistry);
18935        }
18936        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseDelimitedFrom(java.io.InputStream input)
18937            throws java.io.IOException {
18938          return PARSER.parseDelimitedFrom(input);
18939        }
18940        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseDelimitedFrom(
18941            java.io.InputStream input,
18942            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
18943            throws java.io.IOException {
18944          return PARSER.parseDelimitedFrom(input, extensionRegistry);
18945        }
18946        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
18947            com.google.protobuf.CodedInputStream input)
18948            throws java.io.IOException {
18949          return PARSER.parseFrom(input);
18950        }
18951        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
18952            com.google.protobuf.CodedInputStream input,
18953            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
18954            throws java.io.IOException {
18955          return PARSER.parseFrom(input, extensionRegistry);
18956        }
18957    
18958        public static Builder newBuilder() { return Builder.create(); }
18959        public Builder newBuilderForType() { return newBuilder(); }
18960        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto prototype) {
18961          return newBuilder().mergeFrom(prototype);
18962        }
18963        public Builder toBuilder() { return newBuilder(this); }
18964    
18965        @java.lang.Override
18966        protected Builder newBuilderForType(
18967            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
18968          Builder builder = new Builder(parent);
18969          return builder;
18970        }
18971        /**
18972         * Protobuf type {@code hadoop.hdfs.qjournal.FormatResponseProto}
18973         */
18974        public static final class Builder extends
18975            com.google.protobuf.GeneratedMessage.Builder<Builder>
18976           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProtoOrBuilder {
18977          public static final com.google.protobuf.Descriptors.Descriptor
18978              getDescriptor() {
18979            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FormatResponseProto_descriptor;
18980          }
18981    
18982          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
18983              internalGetFieldAccessorTable() {
18984            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FormatResponseProto_fieldAccessorTable
18985                .ensureFieldAccessorsInitialized(
18986                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.Builder.class);
18987          }
18988    
18989          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.newBuilder()
18990          private Builder() {
18991            maybeForceBuilderInitialization();
18992          }
18993    
18994          private Builder(
18995              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
18996            super(parent);
18997            maybeForceBuilderInitialization();
18998          }
18999          private void maybeForceBuilderInitialization() {
19000            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
19001            }
19002          }
19003          private static Builder create() {
19004            return new Builder();
19005          }
19006    
19007          public Builder clear() {
19008            super.clear();
19009            return this;
19010          }
19011    
19012          public Builder clone() {
19013            return create().mergeFrom(buildPartial());
19014          }
19015    
19016          public com.google.protobuf.Descriptors.Descriptor
19017              getDescriptorForType() {
19018            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_FormatResponseProto_descriptor;
19019          }
19020    
19021          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto getDefaultInstanceForType() {
19022            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance();
19023          }
19024    
19025          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto build() {
19026            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto result = buildPartial();
19027            if (!result.isInitialized()) {
19028              throw newUninitializedMessageException(result);
19029            }
19030            return result;
19031          }
19032    
19033          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto buildPartial() {
19034            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto(this);
19035            onBuilt();
19036            return result;
19037          }
19038    
19039          public Builder mergeFrom(com.google.protobuf.Message other) {
19040            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) {
19041              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto)other);
19042            } else {
19043              super.mergeFrom(other);
19044              return this;
19045            }
19046          }
19047    
19048          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto other) {
19049            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance()) return this;
19050            this.mergeUnknownFields(other.getUnknownFields());
19051            return this;
19052          }
19053    
19054          public final boolean isInitialized() {
19055            return true;
19056          }
19057    
19058          public Builder mergeFrom(
19059              com.google.protobuf.CodedInputStream input,
19060              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
19061              throws java.io.IOException {
19062            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parsedMessage = null;
19063            try {
19064              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
19065            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
19066              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) e.getUnfinishedMessage();
19067              throw e;
19068            } finally {
19069              if (parsedMessage != null) {
19070                mergeFrom(parsedMessage);
19071              }
19072            }
19073            return this;
19074          }
19075    
19076          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.FormatResponseProto)
19077        }
19078    
19079        static {
19080          defaultInstance = new FormatResponseProto(true);
19081          defaultInstance.initFields();
19082        }
19083    
19084        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.FormatResponseProto)
19085      }
19086    
19087      public interface NewEpochRequestProtoOrBuilder
19088          extends com.google.protobuf.MessageOrBuilder {
19089    
19090        // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
19091        /**
19092         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
19093         */
19094        boolean hasJid();
19095        /**
19096         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
19097         */
19098        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
19099        /**
19100         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
19101         */
19102        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
19103    
19104        // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
19105        /**
19106         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
19107         */
19108        boolean hasNsInfo();
19109        /**
19110         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
19111         */
19112        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo();
19113        /**
19114         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
19115         */
19116        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder();
19117    
19118        // required uint64 epoch = 3;
19119        /**
19120         * <code>required uint64 epoch = 3;</code>
19121         */
19122        boolean hasEpoch();
19123        /**
19124         * <code>required uint64 epoch = 3;</code>
19125         */
19126        long getEpoch();
19127      }
19128      /**
19129       * Protobuf type {@code hadoop.hdfs.qjournal.NewEpochRequestProto}
19130       *
19131       * <pre>
19132       **
19133       * newEpoch()
19134       * </pre>
19135       */
19136      public static final class NewEpochRequestProto extends
19137          com.google.protobuf.GeneratedMessage
19138          implements NewEpochRequestProtoOrBuilder {
19139        // Use NewEpochRequestProto.newBuilder() to construct.
19140        private NewEpochRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
19141          super(builder);
19142          this.unknownFields = builder.getUnknownFields();
19143        }
19144        private NewEpochRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
19145    
19146        private static final NewEpochRequestProto defaultInstance;
19147        public static NewEpochRequestProto getDefaultInstance() {
19148          return defaultInstance;
19149        }
19150    
19151        public NewEpochRequestProto getDefaultInstanceForType() {
19152          return defaultInstance;
19153        }
19154    
19155        private final com.google.protobuf.UnknownFieldSet unknownFields;
19156        @java.lang.Override
19157        public final com.google.protobuf.UnknownFieldSet
19158            getUnknownFields() {
19159          return this.unknownFields;
19160        }
19161        private NewEpochRequestProto(
19162            com.google.protobuf.CodedInputStream input,
19163            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
19164            throws com.google.protobuf.InvalidProtocolBufferException {
19165          initFields();
19166          int mutable_bitField0_ = 0;
19167          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
19168              com.google.protobuf.UnknownFieldSet.newBuilder();
19169          try {
19170            boolean done = false;
19171            while (!done) {
19172              int tag = input.readTag();
19173              switch (tag) {
19174                case 0:
19175                  done = true;
19176                  break;
19177                default: {
19178                  if (!parseUnknownField(input, unknownFields,
19179                                         extensionRegistry, tag)) {
19180                    done = true;
19181                  }
19182                  break;
19183                }
19184                case 10: {
19185                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
19186                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
19187                    subBuilder = jid_.toBuilder();
19188                  }
19189                  jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
19190                  if (subBuilder != null) {
19191                    subBuilder.mergeFrom(jid_);
19192                    jid_ = subBuilder.buildPartial();
19193                  }
19194                  bitField0_ |= 0x00000001;
19195                  break;
19196                }
19197                case 18: {
19198                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder subBuilder = null;
19199                  if (((bitField0_ & 0x00000002) == 0x00000002)) {
19200                    subBuilder = nsInfo_.toBuilder();
19201                  }
19202                  nsInfo_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.PARSER, extensionRegistry);
19203                  if (subBuilder != null) {
19204                    subBuilder.mergeFrom(nsInfo_);
19205                    nsInfo_ = subBuilder.buildPartial();
19206                  }
19207                  bitField0_ |= 0x00000002;
19208                  break;
19209                }
19210                case 24: {
19211                  bitField0_ |= 0x00000004;
19212                  epoch_ = input.readUInt64();
19213                  break;
19214                }
19215              }
19216            }
19217          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
19218            throw e.setUnfinishedMessage(this);
19219          } catch (java.io.IOException e) {
19220            throw new com.google.protobuf.InvalidProtocolBufferException(
19221                e.getMessage()).setUnfinishedMessage(this);
19222          } finally {
19223            this.unknownFields = unknownFields.build();
19224            makeExtensionsImmutable();
19225          }
19226        }
19227        public static final com.google.protobuf.Descriptors.Descriptor
19228            getDescriptor() {
19229          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_NewEpochRequestProto_descriptor;
19230        }
19231    
19232        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
19233            internalGetFieldAccessorTable() {
19234          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_NewEpochRequestProto_fieldAccessorTable
19235              .ensureFieldAccessorsInitialized(
19236                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.Builder.class);
19237        }
19238    
19239        public static com.google.protobuf.Parser<NewEpochRequestProto> PARSER =
19240            new com.google.protobuf.AbstractParser<NewEpochRequestProto>() {
19241          public NewEpochRequestProto parsePartialFrom(
19242              com.google.protobuf.CodedInputStream input,
19243              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
19244              throws com.google.protobuf.InvalidProtocolBufferException {
19245            return new NewEpochRequestProto(input, extensionRegistry);
19246          }
19247        };
19248    
19249        @java.lang.Override
19250        public com.google.protobuf.Parser<NewEpochRequestProto> getParserForType() {
19251          return PARSER;
19252        }
19253    
19254        private int bitField0_;
19255        // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
19256        public static final int JID_FIELD_NUMBER = 1;
19257        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
19258        /**
19259         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
19260         */
19261        public boolean hasJid() {
19262          return ((bitField0_ & 0x00000001) == 0x00000001);
19263        }
19264        /**
19265         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
19266         */
19267        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
19268          return jid_;
19269        }
19270        /**
19271         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
19272         */
19273        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
19274          return jid_;
19275        }
19276    
19277        // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
19278        public static final int NSINFO_FIELD_NUMBER = 2;
19279        private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_;
19280        /**
19281         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
19282         */
19283        public boolean hasNsInfo() {
19284          return ((bitField0_ & 0x00000002) == 0x00000002);
19285        }
19286        /**
19287         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
19288         */
19289        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
19290          return nsInfo_;
19291        }
19292        /**
19293         * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
19294         */
19295        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
19296          return nsInfo_;
19297        }
19298    
19299        // required uint64 epoch = 3;
19300        public static final int EPOCH_FIELD_NUMBER = 3;
19301        private long epoch_;
19302        /**
19303         * <code>required uint64 epoch = 3;</code>
19304         */
19305        public boolean hasEpoch() {
19306          return ((bitField0_ & 0x00000004) == 0x00000004);
19307        }
19308        /**
19309         * <code>required uint64 epoch = 3;</code>
19310         */
19311        public long getEpoch() {
19312          return epoch_;
19313        }
19314    
19315        private void initFields() {
19316          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
19317          nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
19318          epoch_ = 0L;
19319        }
19320        private byte memoizedIsInitialized = -1;
19321        public final boolean isInitialized() {
19322          byte isInitialized = memoizedIsInitialized;
19323          if (isInitialized != -1) return isInitialized == 1;
19324    
19325          if (!hasJid()) {
19326            memoizedIsInitialized = 0;
19327            return false;
19328          }
19329          if (!hasNsInfo()) {
19330            memoizedIsInitialized = 0;
19331            return false;
19332          }
19333          if (!hasEpoch()) {
19334            memoizedIsInitialized = 0;
19335            return false;
19336          }
19337          if (!getJid().isInitialized()) {
19338            memoizedIsInitialized = 0;
19339            return false;
19340          }
19341          if (!getNsInfo().isInitialized()) {
19342            memoizedIsInitialized = 0;
19343            return false;
19344          }
19345          memoizedIsInitialized = 1;
19346          return true;
19347        }
19348    
19349        public void writeTo(com.google.protobuf.CodedOutputStream output)
19350                            throws java.io.IOException {
19351          getSerializedSize();
19352          if (((bitField0_ & 0x00000001) == 0x00000001)) {
19353            output.writeMessage(1, jid_);
19354          }
19355          if (((bitField0_ & 0x00000002) == 0x00000002)) {
19356            output.writeMessage(2, nsInfo_);
19357          }
19358          if (((bitField0_ & 0x00000004) == 0x00000004)) {
19359            output.writeUInt64(3, epoch_);
19360          }
19361          getUnknownFields().writeTo(output);
19362        }
19363    
19364        private int memoizedSerializedSize = -1;
19365        public int getSerializedSize() {
19366          int size = memoizedSerializedSize;
19367          if (size != -1) return size;
19368    
19369          size = 0;
19370          if (((bitField0_ & 0x00000001) == 0x00000001)) {
19371            size += com.google.protobuf.CodedOutputStream
19372              .computeMessageSize(1, jid_);
19373          }
19374          if (((bitField0_ & 0x00000002) == 0x00000002)) {
19375            size += com.google.protobuf.CodedOutputStream
19376              .computeMessageSize(2, nsInfo_);
19377          }
19378          if (((bitField0_ & 0x00000004) == 0x00000004)) {
19379            size += com.google.protobuf.CodedOutputStream
19380              .computeUInt64Size(3, epoch_);
19381          }
19382          size += getUnknownFields().getSerializedSize();
19383          memoizedSerializedSize = size;
19384          return size;
19385        }
19386    
19387        private static final long serialVersionUID = 0L;
19388        @java.lang.Override
19389        protected java.lang.Object writeReplace()
19390            throws java.io.ObjectStreamException {
19391          return super.writeReplace();
19392        }
19393    
19394        @java.lang.Override
19395        public boolean equals(final java.lang.Object obj) {
19396          if (obj == this) {
19397           return true;
19398          }
19399          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)) {
19400            return super.equals(obj);
19401          }
19402          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto) obj;
19403    
19404          boolean result = true;
19405          result = result && (hasJid() == other.hasJid());
19406          if (hasJid()) {
19407            result = result && getJid()
19408                .equals(other.getJid());
19409          }
19410          result = result && (hasNsInfo() == other.hasNsInfo());
19411          if (hasNsInfo()) {
19412            result = result && getNsInfo()
19413                .equals(other.getNsInfo());
19414          }
19415          result = result && (hasEpoch() == other.hasEpoch());
19416          if (hasEpoch()) {
19417            result = result && (getEpoch()
19418                == other.getEpoch());
19419          }
19420          result = result &&
19421              getUnknownFields().equals(other.getUnknownFields());
19422          return result;
19423        }
19424    
19425        private int memoizedHashCode = 0;
19426        @java.lang.Override
19427        public int hashCode() {
19428          if (memoizedHashCode != 0) {
19429            return memoizedHashCode;
19430          }
19431          int hash = 41;
19432          hash = (19 * hash) + getDescriptorForType().hashCode();
19433          if (hasJid()) {
19434            hash = (37 * hash) + JID_FIELD_NUMBER;
19435            hash = (53 * hash) + getJid().hashCode();
19436          }
19437          if (hasNsInfo()) {
19438            hash = (37 * hash) + NSINFO_FIELD_NUMBER;
19439            hash = (53 * hash) + getNsInfo().hashCode();
19440          }
19441          if (hasEpoch()) {
19442            hash = (37 * hash) + EPOCH_FIELD_NUMBER;
19443            hash = (53 * hash) + hashLong(getEpoch());
19444          }
19445          hash = (29 * hash) + getUnknownFields().hashCode();
19446          memoizedHashCode = hash;
19447          return hash;
19448        }
19449    
19450        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
19451            com.google.protobuf.ByteString data)
19452            throws com.google.protobuf.InvalidProtocolBufferException {
19453          return PARSER.parseFrom(data);
19454        }
19455        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
19456            com.google.protobuf.ByteString data,
19457            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
19458            throws com.google.protobuf.InvalidProtocolBufferException {
19459          return PARSER.parseFrom(data, extensionRegistry);
19460        }
19461        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(byte[] data)
19462            throws com.google.protobuf.InvalidProtocolBufferException {
19463          return PARSER.parseFrom(data);
19464        }
19465        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
19466            byte[] data,
19467            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
19468            throws com.google.protobuf.InvalidProtocolBufferException {
19469          return PARSER.parseFrom(data, extensionRegistry);
19470        }
19471        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(java.io.InputStream input)
19472            throws java.io.IOException {
19473          return PARSER.parseFrom(input);
19474        }
19475        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
19476            java.io.InputStream input,
19477            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
19478            throws java.io.IOException {
19479          return PARSER.parseFrom(input, extensionRegistry);
19480        }
19481        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseDelimitedFrom(java.io.InputStream input)
19482            throws java.io.IOException {
19483          return PARSER.parseDelimitedFrom(input);
19484        }
19485        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseDelimitedFrom(
19486            java.io.InputStream input,
19487            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
19488            throws java.io.IOException {
19489          return PARSER.parseDelimitedFrom(input, extensionRegistry);
19490        }
19491        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
19492            com.google.protobuf.CodedInputStream input)
19493            throws java.io.IOException {
19494          return PARSER.parseFrom(input);
19495        }
19496        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
19497            com.google.protobuf.CodedInputStream input,
19498            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
19499            throws java.io.IOException {
19500          return PARSER.parseFrom(input, extensionRegistry);
19501        }
19502    
19503        public static Builder newBuilder() { return Builder.create(); }
19504        public Builder newBuilderForType() { return newBuilder(); }
19505        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto prototype) {
19506          return newBuilder().mergeFrom(prototype);
19507        }
19508        public Builder toBuilder() { return newBuilder(this); }
19509    
19510        @java.lang.Override
19511        protected Builder newBuilderForType(
19512            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
19513          Builder builder = new Builder(parent);
19514          return builder;
19515        }
19516        /**
19517         * Protobuf type {@code hadoop.hdfs.qjournal.NewEpochRequestProto}
19518         *
19519         * <pre>
19520         **
19521         * newEpoch()
19522         * </pre>
19523         */
19524        public static final class Builder extends
19525            com.google.protobuf.GeneratedMessage.Builder<Builder>
19526           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProtoOrBuilder {
19527          public static final com.google.protobuf.Descriptors.Descriptor
19528              getDescriptor() {
19529            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_NewEpochRequestProto_descriptor;
19530          }
19531    
19532          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
19533              internalGetFieldAccessorTable() {
19534            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_NewEpochRequestProto_fieldAccessorTable
19535                .ensureFieldAccessorsInitialized(
19536                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.Builder.class);
19537          }
19538    
19539          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.newBuilder()
19540          private Builder() {
19541            maybeForceBuilderInitialization();
19542          }
19543    
19544          private Builder(
19545              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
19546            super(parent);
19547            maybeForceBuilderInitialization();
19548          }
19549          private void maybeForceBuilderInitialization() {
19550            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
19551              getJidFieldBuilder();
19552              getNsInfoFieldBuilder();
19553            }
19554          }
19555          private static Builder create() {
19556            return new Builder();
19557          }
19558    
19559          public Builder clear() {
19560            super.clear();
19561            if (jidBuilder_ == null) {
19562              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
19563            } else {
19564              jidBuilder_.clear();
19565            }
19566            bitField0_ = (bitField0_ & ~0x00000001);
19567            if (nsInfoBuilder_ == null) {
19568              nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
19569            } else {
19570              nsInfoBuilder_.clear();
19571            }
19572            bitField0_ = (bitField0_ & ~0x00000002);
19573            epoch_ = 0L;
19574            bitField0_ = (bitField0_ & ~0x00000004);
19575            return this;
19576          }
19577    
19578          public Builder clone() {
19579            return create().mergeFrom(buildPartial());
19580          }
19581    
19582          public com.google.protobuf.Descriptors.Descriptor
19583              getDescriptorForType() {
19584            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_NewEpochRequestProto_descriptor;
19585          }
19586    
19587          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto getDefaultInstanceForType() {
19588            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance();
19589          }
19590    
19591          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto build() {
19592            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto result = buildPartial();
19593            if (!result.isInitialized()) {
19594              throw newUninitializedMessageException(result);
19595            }
19596            return result;
19597          }
19598    
19599          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto buildPartial() {
19600            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto(this);
19601            int from_bitField0_ = bitField0_;
19602            int to_bitField0_ = 0;
19603            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
19604              to_bitField0_ |= 0x00000001;
19605            }
19606            if (jidBuilder_ == null) {
19607              result.jid_ = jid_;
19608            } else {
19609              result.jid_ = jidBuilder_.build();
19610            }
19611            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
19612              to_bitField0_ |= 0x00000002;
19613            }
19614            if (nsInfoBuilder_ == null) {
19615              result.nsInfo_ = nsInfo_;
19616            } else {
19617              result.nsInfo_ = nsInfoBuilder_.build();
19618            }
19619            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
19620              to_bitField0_ |= 0x00000004;
19621            }
19622            result.epoch_ = epoch_;
19623            result.bitField0_ = to_bitField0_;
19624            onBuilt();
19625            return result;
19626          }
19627    
19628          public Builder mergeFrom(com.google.protobuf.Message other) {
19629            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto) {
19630              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)other);
19631            } else {
19632              super.mergeFrom(other);
19633              return this;
19634            }
19635          }
19636    
19637          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto other) {
19638            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance()) return this;
19639            if (other.hasJid()) {
19640              mergeJid(other.getJid());
19641            }
19642            if (other.hasNsInfo()) {
19643              mergeNsInfo(other.getNsInfo());
19644            }
19645            if (other.hasEpoch()) {
19646              setEpoch(other.getEpoch());
19647            }
19648            this.mergeUnknownFields(other.getUnknownFields());
19649            return this;
19650          }
19651    
19652          public final boolean isInitialized() {
19653            if (!hasJid()) {
19654              
19655              return false;
19656            }
19657            if (!hasNsInfo()) {
19658              
19659              return false;
19660            }
19661            if (!hasEpoch()) {
19662              
19663              return false;
19664            }
19665            if (!getJid().isInitialized()) {
19666              
19667              return false;
19668            }
19669            if (!getNsInfo().isInitialized()) {
19670              
19671              return false;
19672            }
19673            return true;
19674          }
19675    
19676          public Builder mergeFrom(
19677              com.google.protobuf.CodedInputStream input,
19678              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
19679              throws java.io.IOException {
19680            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parsedMessage = null;
19681            try {
19682              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
19683            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
19684              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto) e.getUnfinishedMessage();
19685              throw e;
19686            } finally {
19687              if (parsedMessage != null) {
19688                mergeFrom(parsedMessage);
19689              }
19690            }
19691            return this;
19692          }
19693          private int bitField0_;
19694    
19695          // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
19696          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
19697          private com.google.protobuf.SingleFieldBuilder<
19698              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
19699          /**
19700           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
19701           */
19702          public boolean hasJid() {
19703            return ((bitField0_ & 0x00000001) == 0x00000001);
19704          }
19705          /**
19706           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
19707           */
19708          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
19709            if (jidBuilder_ == null) {
19710              return jid_;
19711            } else {
19712              return jidBuilder_.getMessage();
19713            }
19714          }
19715          /**
19716           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
19717           */
19718          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
19719            if (jidBuilder_ == null) {
19720              if (value == null) {
19721                throw new NullPointerException();
19722              }
19723              jid_ = value;
19724              onChanged();
19725            } else {
19726              jidBuilder_.setMessage(value);
19727            }
19728            bitField0_ |= 0x00000001;
19729            return this;
19730          }
19731          /**
19732           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
19733           */
19734          public Builder setJid(
19735              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
19736            if (jidBuilder_ == null) {
19737              jid_ = builderForValue.build();
19738              onChanged();
19739            } else {
19740              jidBuilder_.setMessage(builderForValue.build());
19741            }
19742            bitField0_ |= 0x00000001;
19743            return this;
19744          }
19745          /**
19746           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
19747           */
19748          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
19749            if (jidBuilder_ == null) {
19750              if (((bitField0_ & 0x00000001) == 0x00000001) &&
19751                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
19752                jid_ =
19753                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
19754              } else {
19755                jid_ = value;
19756              }
19757              onChanged();
19758            } else {
19759              jidBuilder_.mergeFrom(value);
19760            }
19761            bitField0_ |= 0x00000001;
19762            return this;
19763          }
19764          /**
19765           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
19766           */
19767          public Builder clearJid() {
19768            if (jidBuilder_ == null) {
19769              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
19770              onChanged();
19771            } else {
19772              jidBuilder_.clear();
19773            }
19774            bitField0_ = (bitField0_ & ~0x00000001);
19775            return this;
19776          }
19777          /**
19778           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
19779           */
19780          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
19781            bitField0_ |= 0x00000001;
19782            onChanged();
19783            return getJidFieldBuilder().getBuilder();
19784          }
19785          /**
19786           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
19787           */
19788          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
19789            if (jidBuilder_ != null) {
19790              return jidBuilder_.getMessageOrBuilder();
19791            } else {
19792              return jid_;
19793            }
19794          }
19795          /**
19796           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
19797           */
19798          private com.google.protobuf.SingleFieldBuilder<
19799              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
19800              getJidFieldBuilder() {
19801            if (jidBuilder_ == null) {
19802              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
19803                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
19804                      jid_,
19805                      getParentForChildren(),
19806                      isClean());
19807              jid_ = null;
19808            }
19809            return jidBuilder_;
19810          }
19811    
19812          // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
19813          private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
19814          private com.google.protobuf.SingleFieldBuilder<
19815              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> nsInfoBuilder_;
19816          /**
19817           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
19818           */
19819          public boolean hasNsInfo() {
19820            return ((bitField0_ & 0x00000002) == 0x00000002);
19821          }
19822          /**
19823           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
19824           */
19825          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
19826            if (nsInfoBuilder_ == null) {
19827              return nsInfo_;
19828            } else {
19829              return nsInfoBuilder_.getMessage();
19830            }
19831          }
19832          /**
19833           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
19834           */
19835          public Builder setNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
19836            if (nsInfoBuilder_ == null) {
19837              if (value == null) {
19838                throw new NullPointerException();
19839              }
19840              nsInfo_ = value;
19841              onChanged();
19842            } else {
19843              nsInfoBuilder_.setMessage(value);
19844            }
19845            bitField0_ |= 0x00000002;
19846            return this;
19847          }
19848          /**
19849           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
19850           */
19851          public Builder setNsInfo(
19852              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder builderForValue) {
19853            if (nsInfoBuilder_ == null) {
19854              nsInfo_ = builderForValue.build();
19855              onChanged();
19856            } else {
19857              nsInfoBuilder_.setMessage(builderForValue.build());
19858            }
19859            bitField0_ |= 0x00000002;
19860            return this;
19861          }
19862          /**
19863           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
19864           */
19865          public Builder mergeNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
19866            if (nsInfoBuilder_ == null) {
19867              if (((bitField0_ & 0x00000002) == 0x00000002) &&
19868                  nsInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance()) {
19869                nsInfo_ =
19870                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.newBuilder(nsInfo_).mergeFrom(value).buildPartial();
19871              } else {
19872                nsInfo_ = value;
19873              }
19874              onChanged();
19875            } else {
19876              nsInfoBuilder_.mergeFrom(value);
19877            }
19878            bitField0_ |= 0x00000002;
19879            return this;
19880          }
19881          /**
19882           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
19883           */
19884          public Builder clearNsInfo() {
19885            if (nsInfoBuilder_ == null) {
19886              nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
19887              onChanged();
19888            } else {
19889              nsInfoBuilder_.clear();
19890            }
19891            bitField0_ = (bitField0_ & ~0x00000002);
19892            return this;
19893          }
19894          /**
19895           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
19896           */
19897          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder getNsInfoBuilder() {
19898            bitField0_ |= 0x00000002;
19899            onChanged();
19900            return getNsInfoFieldBuilder().getBuilder();
19901          }
19902          /**
19903           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
19904           */
19905          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
19906            if (nsInfoBuilder_ != null) {
19907              return nsInfoBuilder_.getMessageOrBuilder();
19908            } else {
19909              return nsInfo_;
19910            }
19911          }
19912          /**
19913           * <code>required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;</code>
19914           */
19915          private com.google.protobuf.SingleFieldBuilder<
19916              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> 
19917              getNsInfoFieldBuilder() {
19918            if (nsInfoBuilder_ == null) {
19919              nsInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
19920                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder>(
19921                      nsInfo_,
19922                      getParentForChildren(),
19923                      isClean());
19924              nsInfo_ = null;
19925            }
19926            return nsInfoBuilder_;
19927          }
19928    
19929          // required uint64 epoch = 3;
19930          private long epoch_ ;
19931          /**
19932           * <code>required uint64 epoch = 3;</code>
19933           */
19934          public boolean hasEpoch() {
19935            return ((bitField0_ & 0x00000004) == 0x00000004);
19936          }
19937          /**
19938           * <code>required uint64 epoch = 3;</code>
19939           */
19940          public long getEpoch() {
19941            return epoch_;
19942          }
19943          /**
19944           * <code>required uint64 epoch = 3;</code>
19945           */
19946          public Builder setEpoch(long value) {
19947            bitField0_ |= 0x00000004;
19948            epoch_ = value;
19949            onChanged();
19950            return this;
19951          }
19952          /**
19953           * <code>required uint64 epoch = 3;</code>
19954           */
19955          public Builder clearEpoch() {
19956            bitField0_ = (bitField0_ & ~0x00000004);
19957            epoch_ = 0L;
19958            onChanged();
19959            return this;
19960          }
19961    
19962          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.NewEpochRequestProto)
19963        }
19964    
19965        static {
19966          defaultInstance = new NewEpochRequestProto(true);
19967          defaultInstance.initFields();
19968        }
19969    
19970        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.NewEpochRequestProto)
19971      }
19972    
19973      public interface NewEpochResponseProtoOrBuilder
19974          extends com.google.protobuf.MessageOrBuilder {
19975    
19976        // optional uint64 lastSegmentTxId = 1;
19977        /**
19978         * <code>optional uint64 lastSegmentTxId = 1;</code>
19979         */
19980        boolean hasLastSegmentTxId();
19981        /**
19982         * <code>optional uint64 lastSegmentTxId = 1;</code>
19983         */
19984        long getLastSegmentTxId();
19985      }
19986      /**
19987       * Protobuf type {@code hadoop.hdfs.qjournal.NewEpochResponseProto}
19988       */
19989      public static final class NewEpochResponseProto extends
19990          com.google.protobuf.GeneratedMessage
19991          implements NewEpochResponseProtoOrBuilder {
19992        // Use NewEpochResponseProto.newBuilder() to construct.
19993        private NewEpochResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
19994          super(builder);
19995          this.unknownFields = builder.getUnknownFields();
19996        }
19997        private NewEpochResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
19998    
19999        private static final NewEpochResponseProto defaultInstance;
20000        public static NewEpochResponseProto getDefaultInstance() {
20001          return defaultInstance;
20002        }
20003    
20004        public NewEpochResponseProto getDefaultInstanceForType() {
20005          return defaultInstance;
20006        }
20007    
20008        private final com.google.protobuf.UnknownFieldSet unknownFields;
20009        @java.lang.Override
20010        public final com.google.protobuf.UnknownFieldSet
20011            getUnknownFields() {
20012          return this.unknownFields;
20013        }
20014        private NewEpochResponseProto(
20015            com.google.protobuf.CodedInputStream input,
20016            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20017            throws com.google.protobuf.InvalidProtocolBufferException {
20018          initFields();
20019          int mutable_bitField0_ = 0;
20020          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
20021              com.google.protobuf.UnknownFieldSet.newBuilder();
20022          try {
20023            boolean done = false;
20024            while (!done) {
20025              int tag = input.readTag();
20026              switch (tag) {
20027                case 0:
20028                  done = true;
20029                  break;
20030                default: {
20031                  if (!parseUnknownField(input, unknownFields,
20032                                         extensionRegistry, tag)) {
20033                    done = true;
20034                  }
20035                  break;
20036                }
20037                case 8: {
20038                  bitField0_ |= 0x00000001;
20039                  lastSegmentTxId_ = input.readUInt64();
20040                  break;
20041                }
20042              }
20043            }
20044          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
20045            throw e.setUnfinishedMessage(this);
20046          } catch (java.io.IOException e) {
20047            throw new com.google.protobuf.InvalidProtocolBufferException(
20048                e.getMessage()).setUnfinishedMessage(this);
20049          } finally {
20050            this.unknownFields = unknownFields.build();
20051            makeExtensionsImmutable();
20052          }
20053        }
20054        public static final com.google.protobuf.Descriptors.Descriptor
20055            getDescriptor() {
20056          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_NewEpochResponseProto_descriptor;
20057        }
20058    
20059        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
20060            internalGetFieldAccessorTable() {
20061          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_NewEpochResponseProto_fieldAccessorTable
20062              .ensureFieldAccessorsInitialized(
20063                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.Builder.class);
20064        }
20065    
20066        public static com.google.protobuf.Parser<NewEpochResponseProto> PARSER =
20067            new com.google.protobuf.AbstractParser<NewEpochResponseProto>() {
20068          public NewEpochResponseProto parsePartialFrom(
20069              com.google.protobuf.CodedInputStream input,
20070              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20071              throws com.google.protobuf.InvalidProtocolBufferException {
20072            return new NewEpochResponseProto(input, extensionRegistry);
20073          }
20074        };
20075    
20076        @java.lang.Override
20077        public com.google.protobuf.Parser<NewEpochResponseProto> getParserForType() {
20078          return PARSER;
20079        }
20080    
20081        private int bitField0_;
20082        // optional uint64 lastSegmentTxId = 1;
20083        public static final int LASTSEGMENTTXID_FIELD_NUMBER = 1;
20084        private long lastSegmentTxId_;
20085        /**
20086         * <code>optional uint64 lastSegmentTxId = 1;</code>
20087         */
20088        public boolean hasLastSegmentTxId() {
20089          return ((bitField0_ & 0x00000001) == 0x00000001);
20090        }
20091        /**
20092         * <code>optional uint64 lastSegmentTxId = 1;</code>
20093         */
20094        public long getLastSegmentTxId() {
20095          return lastSegmentTxId_;
20096        }
20097    
20098        private void initFields() {
20099          lastSegmentTxId_ = 0L;
20100        }
20101        private byte memoizedIsInitialized = -1;
20102        public final boolean isInitialized() {
20103          byte isInitialized = memoizedIsInitialized;
20104          if (isInitialized != -1) return isInitialized == 1;
20105    
20106          memoizedIsInitialized = 1;
20107          return true;
20108        }
20109    
20110        public void writeTo(com.google.protobuf.CodedOutputStream output)
20111                            throws java.io.IOException {
20112          getSerializedSize();
20113          if (((bitField0_ & 0x00000001) == 0x00000001)) {
20114            output.writeUInt64(1, lastSegmentTxId_);
20115          }
20116          getUnknownFields().writeTo(output);
20117        }
20118    
20119        private int memoizedSerializedSize = -1;
20120        public int getSerializedSize() {
20121          int size = memoizedSerializedSize;
20122          if (size != -1) return size;
20123    
20124          size = 0;
20125          if (((bitField0_ & 0x00000001) == 0x00000001)) {
20126            size += com.google.protobuf.CodedOutputStream
20127              .computeUInt64Size(1, lastSegmentTxId_);
20128          }
20129          size += getUnknownFields().getSerializedSize();
20130          memoizedSerializedSize = size;
20131          return size;
20132        }
20133    
20134        private static final long serialVersionUID = 0L;
20135        @java.lang.Override
20136        protected java.lang.Object writeReplace()
20137            throws java.io.ObjectStreamException {
20138          return super.writeReplace();
20139        }
20140    
20141        @java.lang.Override
20142        public boolean equals(final java.lang.Object obj) {
20143          if (obj == this) {
20144           return true;
20145          }
20146          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto)) {
20147            return super.equals(obj);
20148          }
20149          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) obj;
20150    
20151          boolean result = true;
20152          result = result && (hasLastSegmentTxId() == other.hasLastSegmentTxId());
20153          if (hasLastSegmentTxId()) {
20154            result = result && (getLastSegmentTxId()
20155                == other.getLastSegmentTxId());
20156          }
20157          result = result &&
20158              getUnknownFields().equals(other.getUnknownFields());
20159          return result;
20160        }
20161    
20162        private int memoizedHashCode = 0;
20163        @java.lang.Override
20164        public int hashCode() {
20165          if (memoizedHashCode != 0) {
20166            return memoizedHashCode;
20167          }
20168          int hash = 41;
20169          hash = (19 * hash) + getDescriptorForType().hashCode();
20170          if (hasLastSegmentTxId()) {
20171            hash = (37 * hash) + LASTSEGMENTTXID_FIELD_NUMBER;
20172            hash = (53 * hash) + hashLong(getLastSegmentTxId());
20173          }
20174          hash = (29 * hash) + getUnknownFields().hashCode();
20175          memoizedHashCode = hash;
20176          return hash;
20177        }
20178    
20179        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
20180            com.google.protobuf.ByteString data)
20181            throws com.google.protobuf.InvalidProtocolBufferException {
20182          return PARSER.parseFrom(data);
20183        }
20184        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
20185            com.google.protobuf.ByteString data,
20186            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20187            throws com.google.protobuf.InvalidProtocolBufferException {
20188          return PARSER.parseFrom(data, extensionRegistry);
20189        }
20190        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(byte[] data)
20191            throws com.google.protobuf.InvalidProtocolBufferException {
20192          return PARSER.parseFrom(data);
20193        }
20194        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
20195            byte[] data,
20196            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20197            throws com.google.protobuf.InvalidProtocolBufferException {
20198          return PARSER.parseFrom(data, extensionRegistry);
20199        }
20200        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(java.io.InputStream input)
20201            throws java.io.IOException {
20202          return PARSER.parseFrom(input);
20203        }
20204        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
20205            java.io.InputStream input,
20206            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20207            throws java.io.IOException {
20208          return PARSER.parseFrom(input, extensionRegistry);
20209        }
20210        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseDelimitedFrom(java.io.InputStream input)
20211            throws java.io.IOException {
20212          return PARSER.parseDelimitedFrom(input);
20213        }
20214        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseDelimitedFrom(
20215            java.io.InputStream input,
20216            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20217            throws java.io.IOException {
20218          return PARSER.parseDelimitedFrom(input, extensionRegistry);
20219        }
20220        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
20221            com.google.protobuf.CodedInputStream input)
20222            throws java.io.IOException {
20223          return PARSER.parseFrom(input);
20224        }
20225        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
20226            com.google.protobuf.CodedInputStream input,
20227            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20228            throws java.io.IOException {
20229          return PARSER.parseFrom(input, extensionRegistry);
20230        }
20231    
20232        public static Builder newBuilder() { return Builder.create(); }
20233        public Builder newBuilderForType() { return newBuilder(); }
20234        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto prototype) {
20235          return newBuilder().mergeFrom(prototype);
20236        }
20237        public Builder toBuilder() { return newBuilder(this); }
20238    
20239        @java.lang.Override
20240        protected Builder newBuilderForType(
20241            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
20242          Builder builder = new Builder(parent);
20243          return builder;
20244        }
20245        /**
20246         * Protobuf type {@code hadoop.hdfs.qjournal.NewEpochResponseProto}
20247         */
20248        public static final class Builder extends
20249            com.google.protobuf.GeneratedMessage.Builder<Builder>
20250           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProtoOrBuilder {
20251          public static final com.google.protobuf.Descriptors.Descriptor
20252              getDescriptor() {
20253            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_NewEpochResponseProto_descriptor;
20254          }
20255    
20256          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
20257              internalGetFieldAccessorTable() {
20258            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_NewEpochResponseProto_fieldAccessorTable
20259                .ensureFieldAccessorsInitialized(
20260                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.Builder.class);
20261          }
20262    
20263          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.newBuilder()
20264          private Builder() {
20265            maybeForceBuilderInitialization();
20266          }
20267    
20268          private Builder(
20269              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
20270            super(parent);
20271            maybeForceBuilderInitialization();
20272          }
20273          private void maybeForceBuilderInitialization() {
20274            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
20275            }
20276          }
20277          private static Builder create() {
20278            return new Builder();
20279          }
20280    
20281          public Builder clear() {
20282            super.clear();
20283            lastSegmentTxId_ = 0L;
20284            bitField0_ = (bitField0_ & ~0x00000001);
20285            return this;
20286          }
20287    
20288          public Builder clone() {
20289            return create().mergeFrom(buildPartial());
20290          }
20291    
20292          public com.google.protobuf.Descriptors.Descriptor
20293              getDescriptorForType() {
20294            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_NewEpochResponseProto_descriptor;
20295          }
20296    
20297          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto getDefaultInstanceForType() {
20298            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance();
20299          }
20300    
20301          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto build() {
20302            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto result = buildPartial();
20303            if (!result.isInitialized()) {
20304              throw newUninitializedMessageException(result);
20305            }
20306            return result;
20307          }
20308    
20309          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto buildPartial() {
20310            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto(this);
20311            int from_bitField0_ = bitField0_;
20312            int to_bitField0_ = 0;
20313            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
20314              to_bitField0_ |= 0x00000001;
20315            }
20316            result.lastSegmentTxId_ = lastSegmentTxId_;
20317            result.bitField0_ = to_bitField0_;
20318            onBuilt();
20319            return result;
20320          }
20321    
20322          public Builder mergeFrom(com.google.protobuf.Message other) {
20323            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) {
20324              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto)other);
20325            } else {
20326              super.mergeFrom(other);
20327              return this;
20328            }
20329          }
20330    
20331          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto other) {
20332            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance()) return this;
20333            if (other.hasLastSegmentTxId()) {
20334              setLastSegmentTxId(other.getLastSegmentTxId());
20335            }
20336            this.mergeUnknownFields(other.getUnknownFields());
20337            return this;
20338          }
20339    
20340          public final boolean isInitialized() {
20341            return true;
20342          }
20343    
20344          public Builder mergeFrom(
20345              com.google.protobuf.CodedInputStream input,
20346              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20347              throws java.io.IOException {
20348            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parsedMessage = null;
20349            try {
20350              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
20351            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
20352              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) e.getUnfinishedMessage();
20353              throw e;
20354            } finally {
20355              if (parsedMessage != null) {
20356                mergeFrom(parsedMessage);
20357              }
20358            }
20359            return this;
20360          }
20361          private int bitField0_;
20362    
20363          // optional uint64 lastSegmentTxId = 1;
20364          private long lastSegmentTxId_ ;
20365          /**
20366           * <code>optional uint64 lastSegmentTxId = 1;</code>
20367           */
20368          public boolean hasLastSegmentTxId() {
20369            return ((bitField0_ & 0x00000001) == 0x00000001);
20370          }
20371          /**
20372           * <code>optional uint64 lastSegmentTxId = 1;</code>
20373           */
20374          public long getLastSegmentTxId() {
20375            return lastSegmentTxId_;
20376          }
20377          /**
20378           * <code>optional uint64 lastSegmentTxId = 1;</code>
20379           */
20380          public Builder setLastSegmentTxId(long value) {
20381            bitField0_ |= 0x00000001;
20382            lastSegmentTxId_ = value;
20383            onChanged();
20384            return this;
20385          }
20386          /**
20387           * <code>optional uint64 lastSegmentTxId = 1;</code>
20388           */
20389          public Builder clearLastSegmentTxId() {
20390            bitField0_ = (bitField0_ & ~0x00000001);
20391            lastSegmentTxId_ = 0L;
20392            onChanged();
20393            return this;
20394          }
20395    
20396          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.NewEpochResponseProto)
20397        }
20398    
20399        static {
20400          defaultInstance = new NewEpochResponseProto(true);
20401          defaultInstance.initFields();
20402        }
20403    
20404        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.NewEpochResponseProto)
20405      }
20406    
20407      public interface GetEditLogManifestRequestProtoOrBuilder
20408          extends com.google.protobuf.MessageOrBuilder {
20409    
20410        // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
20411        /**
20412         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
20413         */
20414        boolean hasJid();
20415        /**
20416         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
20417         */
20418        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
20419        /**
20420         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
20421         */
20422        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
20423    
20424        // required uint64 sinceTxId = 2;
20425        /**
20426         * <code>required uint64 sinceTxId = 2;</code>
20427         *
20428         * <pre>
20429         * Transaction ID
20430         * </pre>
20431         */
20432        boolean hasSinceTxId();
20433        /**
20434         * <code>required uint64 sinceTxId = 2;</code>
20435         *
20436         * <pre>
20437         * Transaction ID
20438         * </pre>
20439         */
20440        long getSinceTxId();
20441    
20442        // optional bool inProgressOk = 4 [default = false];
20443        /**
20444         * <code>optional bool inProgressOk = 4 [default = false];</code>
20445         *
20446         * <pre>
20447         * Whether or not the client will be reading from the returned streams.
20448         * optional bool forReading = 3 [default = true]; &lt;obsolete, do not reuse&gt;
20449         * </pre>
20450         */
20451        boolean hasInProgressOk();
20452        /**
20453         * <code>optional bool inProgressOk = 4 [default = false];</code>
20454         *
20455         * <pre>
20456         * Whether or not the client will be reading from the returned streams.
20457         * optional bool forReading = 3 [default = true]; &lt;obsolete, do not reuse&gt;
20458         * </pre>
20459         */
20460        boolean getInProgressOk();
20461      }
20462      /**
20463       * Protobuf type {@code hadoop.hdfs.qjournal.GetEditLogManifestRequestProto}
20464       *
20465       * <pre>
20466       **
20467       * getEditLogManifest()
20468       * </pre>
20469       */
20470      public static final class GetEditLogManifestRequestProto extends
20471          com.google.protobuf.GeneratedMessage
20472          implements GetEditLogManifestRequestProtoOrBuilder {
20473        // Use GetEditLogManifestRequestProto.newBuilder() to construct.
20474        private GetEditLogManifestRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
20475          super(builder);
20476          this.unknownFields = builder.getUnknownFields();
20477        }
20478        private GetEditLogManifestRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
20479    
20480        private static final GetEditLogManifestRequestProto defaultInstance;
20481        public static GetEditLogManifestRequestProto getDefaultInstance() {
20482          return defaultInstance;
20483        }
20484    
20485        public GetEditLogManifestRequestProto getDefaultInstanceForType() {
20486          return defaultInstance;
20487        }
20488    
20489        private final com.google.protobuf.UnknownFieldSet unknownFields;
20490        @java.lang.Override
20491        public final com.google.protobuf.UnknownFieldSet
20492            getUnknownFields() {
20493          return this.unknownFields;
20494        }
20495        private GetEditLogManifestRequestProto(
20496            com.google.protobuf.CodedInputStream input,
20497            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20498            throws com.google.protobuf.InvalidProtocolBufferException {
20499          initFields();
20500          int mutable_bitField0_ = 0;
20501          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
20502              com.google.protobuf.UnknownFieldSet.newBuilder();
20503          try {
20504            boolean done = false;
20505            while (!done) {
20506              int tag = input.readTag();
20507              switch (tag) {
20508                case 0:
20509                  done = true;
20510                  break;
20511                default: {
20512                  if (!parseUnknownField(input, unknownFields,
20513                                         extensionRegistry, tag)) {
20514                    done = true;
20515                  }
20516                  break;
20517                }
20518                case 10: {
20519                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = null;
20520                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
20521                    subBuilder = jid_.toBuilder();
20522                  }
20523                  jid_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.PARSER, extensionRegistry);
20524                  if (subBuilder != null) {
20525                    subBuilder.mergeFrom(jid_);
20526                    jid_ = subBuilder.buildPartial();
20527                  }
20528                  bitField0_ |= 0x00000001;
20529                  break;
20530                }
20531                case 16: {
20532                  bitField0_ |= 0x00000002;
20533                  sinceTxId_ = input.readUInt64();
20534                  break;
20535                }
20536                case 32: {
20537                  bitField0_ |= 0x00000004;
20538                  inProgressOk_ = input.readBool();
20539                  break;
20540                }
20541              }
20542            }
20543          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
20544            throw e.setUnfinishedMessage(this);
20545          } catch (java.io.IOException e) {
20546            throw new com.google.protobuf.InvalidProtocolBufferException(
20547                e.getMessage()).setUnfinishedMessage(this);
20548          } finally {
20549            this.unknownFields = unknownFields.build();
20550            makeExtensionsImmutable();
20551          }
20552        }
20553        public static final com.google.protobuf.Descriptors.Descriptor
20554            getDescriptor() {
20555          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetEditLogManifestRequestProto_descriptor;
20556        }
20557    
20558        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
20559            internalGetFieldAccessorTable() {
20560          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetEditLogManifestRequestProto_fieldAccessorTable
20561              .ensureFieldAccessorsInitialized(
20562                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.Builder.class);
20563        }
20564    
20565        public static com.google.protobuf.Parser<GetEditLogManifestRequestProto> PARSER =
20566            new com.google.protobuf.AbstractParser<GetEditLogManifestRequestProto>() {
20567          public GetEditLogManifestRequestProto parsePartialFrom(
20568              com.google.protobuf.CodedInputStream input,
20569              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20570              throws com.google.protobuf.InvalidProtocolBufferException {
20571            return new GetEditLogManifestRequestProto(input, extensionRegistry);
20572          }
20573        };
20574    
20575        @java.lang.Override
20576        public com.google.protobuf.Parser<GetEditLogManifestRequestProto> getParserForType() {
20577          return PARSER;
20578        }
20579    
20580        private int bitField0_;
20581        // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
20582        public static final int JID_FIELD_NUMBER = 1;
20583        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
20584        /**
20585         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
20586         */
20587        public boolean hasJid() {
20588          return ((bitField0_ & 0x00000001) == 0x00000001);
20589        }
20590        /**
20591         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
20592         */
20593        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
20594          return jid_;
20595        }
20596        /**
20597         * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
20598         */
20599        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
20600          return jid_;
20601        }
20602    
20603        // required uint64 sinceTxId = 2;
20604        public static final int SINCETXID_FIELD_NUMBER = 2;
20605        private long sinceTxId_;
20606        /**
20607         * <code>required uint64 sinceTxId = 2;</code>
20608         *
20609         * <pre>
20610         * Transaction ID
20611         * </pre>
20612         */
20613        public boolean hasSinceTxId() {
20614          return ((bitField0_ & 0x00000002) == 0x00000002);
20615        }
20616        /**
20617         * <code>required uint64 sinceTxId = 2;</code>
20618         *
20619         * <pre>
20620         * Transaction ID
20621         * </pre>
20622         */
20623        public long getSinceTxId() {
20624          return sinceTxId_;
20625        }
20626    
20627        // optional bool inProgressOk = 4 [default = false];
20628        public static final int INPROGRESSOK_FIELD_NUMBER = 4;
20629        private boolean inProgressOk_;
20630        /**
20631         * <code>optional bool inProgressOk = 4 [default = false];</code>
20632         *
20633         * <pre>
20634         * Whether or not the client will be reading from the returned streams.
20635         * optional bool forReading = 3 [default = true]; &lt;obsolete, do not reuse&gt;
20636         * </pre>
20637         */
20638        public boolean hasInProgressOk() {
20639          return ((bitField0_ & 0x00000004) == 0x00000004);
20640        }
20641        /**
20642         * <code>optional bool inProgressOk = 4 [default = false];</code>
20643         *
20644         * <pre>
20645         * Whether or not the client will be reading from the returned streams.
20646         * optional bool forReading = 3 [default = true]; &lt;obsolete, do not reuse&gt;
20647         * </pre>
20648         */
20649        public boolean getInProgressOk() {
20650          return inProgressOk_;
20651        }
20652    
20653        private void initFields() {
20654          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
20655          sinceTxId_ = 0L;
20656          inProgressOk_ = false;
20657        }
20658        private byte memoizedIsInitialized = -1;
20659        public final boolean isInitialized() {
20660          byte isInitialized = memoizedIsInitialized;
20661          if (isInitialized != -1) return isInitialized == 1;
20662    
20663          if (!hasJid()) {
20664            memoizedIsInitialized = 0;
20665            return false;
20666          }
20667          if (!hasSinceTxId()) {
20668            memoizedIsInitialized = 0;
20669            return false;
20670          }
20671          if (!getJid().isInitialized()) {
20672            memoizedIsInitialized = 0;
20673            return false;
20674          }
20675          memoizedIsInitialized = 1;
20676          return true;
20677        }
20678    
20679        public void writeTo(com.google.protobuf.CodedOutputStream output)
20680                            throws java.io.IOException {
20681          getSerializedSize();
20682          if (((bitField0_ & 0x00000001) == 0x00000001)) {
20683            output.writeMessage(1, jid_);
20684          }
20685          if (((bitField0_ & 0x00000002) == 0x00000002)) {
20686            output.writeUInt64(2, sinceTxId_);
20687          }
20688          if (((bitField0_ & 0x00000004) == 0x00000004)) {
20689            output.writeBool(4, inProgressOk_);
20690          }
20691          getUnknownFields().writeTo(output);
20692        }
20693    
20694        private int memoizedSerializedSize = -1;
20695        public int getSerializedSize() {
20696          int size = memoizedSerializedSize;
20697          if (size != -1) return size;
20698    
20699          size = 0;
20700          if (((bitField0_ & 0x00000001) == 0x00000001)) {
20701            size += com.google.protobuf.CodedOutputStream
20702              .computeMessageSize(1, jid_);
20703          }
20704          if (((bitField0_ & 0x00000002) == 0x00000002)) {
20705            size += com.google.protobuf.CodedOutputStream
20706              .computeUInt64Size(2, sinceTxId_);
20707          }
20708          if (((bitField0_ & 0x00000004) == 0x00000004)) {
20709            size += com.google.protobuf.CodedOutputStream
20710              .computeBoolSize(4, inProgressOk_);
20711          }
20712          size += getUnknownFields().getSerializedSize();
20713          memoizedSerializedSize = size;
20714          return size;
20715        }
20716    
20717        private static final long serialVersionUID = 0L;
20718        @java.lang.Override
20719        protected java.lang.Object writeReplace()
20720            throws java.io.ObjectStreamException {
20721          return super.writeReplace();
20722        }
20723    
20724        @java.lang.Override
20725        public boolean equals(final java.lang.Object obj) {
20726          if (obj == this) {
20727           return true;
20728          }
20729          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)) {
20730            return super.equals(obj);
20731          }
20732          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto) obj;
20733    
20734          boolean result = true;
20735          result = result && (hasJid() == other.hasJid());
20736          if (hasJid()) {
20737            result = result && getJid()
20738                .equals(other.getJid());
20739          }
20740          result = result && (hasSinceTxId() == other.hasSinceTxId());
20741          if (hasSinceTxId()) {
20742            result = result && (getSinceTxId()
20743                == other.getSinceTxId());
20744          }
20745          result = result && (hasInProgressOk() == other.hasInProgressOk());
20746          if (hasInProgressOk()) {
20747            result = result && (getInProgressOk()
20748                == other.getInProgressOk());
20749          }
20750          result = result &&
20751              getUnknownFields().equals(other.getUnknownFields());
20752          return result;
20753        }
20754    
20755        private int memoizedHashCode = 0;
20756        @java.lang.Override
20757        public int hashCode() {
20758          if (memoizedHashCode != 0) {
20759            return memoizedHashCode;
20760          }
20761          int hash = 41;
20762          hash = (19 * hash) + getDescriptorForType().hashCode();
20763          if (hasJid()) {
20764            hash = (37 * hash) + JID_FIELD_NUMBER;
20765            hash = (53 * hash) + getJid().hashCode();
20766          }
20767          if (hasSinceTxId()) {
20768            hash = (37 * hash) + SINCETXID_FIELD_NUMBER;
20769            hash = (53 * hash) + hashLong(getSinceTxId());
20770          }
20771          if (hasInProgressOk()) {
20772            hash = (37 * hash) + INPROGRESSOK_FIELD_NUMBER;
20773            hash = (53 * hash) + hashBoolean(getInProgressOk());
20774          }
20775          hash = (29 * hash) + getUnknownFields().hashCode();
20776          memoizedHashCode = hash;
20777          return hash;
20778        }
20779    
20780        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
20781            com.google.protobuf.ByteString data)
20782            throws com.google.protobuf.InvalidProtocolBufferException {
20783          return PARSER.parseFrom(data);
20784        }
20785        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
20786            com.google.protobuf.ByteString data,
20787            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20788            throws com.google.protobuf.InvalidProtocolBufferException {
20789          return PARSER.parseFrom(data, extensionRegistry);
20790        }
20791        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(byte[] data)
20792            throws com.google.protobuf.InvalidProtocolBufferException {
20793          return PARSER.parseFrom(data);
20794        }
20795        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
20796            byte[] data,
20797            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20798            throws com.google.protobuf.InvalidProtocolBufferException {
20799          return PARSER.parseFrom(data, extensionRegistry);
20800        }
20801        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(java.io.InputStream input)
20802            throws java.io.IOException {
20803          return PARSER.parseFrom(input);
20804        }
20805        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
20806            java.io.InputStream input,
20807            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20808            throws java.io.IOException {
20809          return PARSER.parseFrom(input, extensionRegistry);
20810        }
20811        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseDelimitedFrom(java.io.InputStream input)
20812            throws java.io.IOException {
20813          return PARSER.parseDelimitedFrom(input);
20814        }
20815        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseDelimitedFrom(
20816            java.io.InputStream input,
20817            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20818            throws java.io.IOException {
20819          return PARSER.parseDelimitedFrom(input, extensionRegistry);
20820        }
20821        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
20822            com.google.protobuf.CodedInputStream input)
20823            throws java.io.IOException {
20824          return PARSER.parseFrom(input);
20825        }
20826        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
20827            com.google.protobuf.CodedInputStream input,
20828            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20829            throws java.io.IOException {
20830          return PARSER.parseFrom(input, extensionRegistry);
20831        }
20832    
20833        public static Builder newBuilder() { return Builder.create(); }
20834        public Builder newBuilderForType() { return newBuilder(); }
20835        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto prototype) {
20836          return newBuilder().mergeFrom(prototype);
20837        }
20838        public Builder toBuilder() { return newBuilder(this); }
20839    
20840        @java.lang.Override
20841        protected Builder newBuilderForType(
20842            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
20843          Builder builder = new Builder(parent);
20844          return builder;
20845        }
20846        /**
20847         * Protobuf type {@code hadoop.hdfs.qjournal.GetEditLogManifestRequestProto}
20848         *
20849         * <pre>
20850         **
20851         * getEditLogManifest()
20852         * </pre>
20853         */
20854        public static final class Builder extends
20855            com.google.protobuf.GeneratedMessage.Builder<Builder>
20856           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProtoOrBuilder {
20857          public static final com.google.protobuf.Descriptors.Descriptor
20858              getDescriptor() {
20859            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetEditLogManifestRequestProto_descriptor;
20860          }
20861    
20862          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
20863              internalGetFieldAccessorTable() {
20864            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetEditLogManifestRequestProto_fieldAccessorTable
20865                .ensureFieldAccessorsInitialized(
20866                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.Builder.class);
20867          }
20868    
20869          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.newBuilder()
20870          private Builder() {
20871            maybeForceBuilderInitialization();
20872          }
20873    
20874          private Builder(
20875              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
20876            super(parent);
20877            maybeForceBuilderInitialization();
20878          }
20879          private void maybeForceBuilderInitialization() {
20880            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
20881              getJidFieldBuilder();
20882            }
20883          }
20884          private static Builder create() {
20885            return new Builder();
20886          }
20887    
20888          public Builder clear() {
20889            super.clear();
20890            if (jidBuilder_ == null) {
20891              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
20892            } else {
20893              jidBuilder_.clear();
20894            }
20895            bitField0_ = (bitField0_ & ~0x00000001);
20896            sinceTxId_ = 0L;
20897            bitField0_ = (bitField0_ & ~0x00000002);
20898            inProgressOk_ = false;
20899            bitField0_ = (bitField0_ & ~0x00000004);
20900            return this;
20901          }
20902    
20903          public Builder clone() {
20904            return create().mergeFrom(buildPartial());
20905          }
20906    
20907          public com.google.protobuf.Descriptors.Descriptor
20908              getDescriptorForType() {
20909            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetEditLogManifestRequestProto_descriptor;
20910          }
20911    
20912          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto getDefaultInstanceForType() {
20913            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance();
20914          }
20915    
20916          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto build() {
20917            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto result = buildPartial();
20918            if (!result.isInitialized()) {
20919              throw newUninitializedMessageException(result);
20920            }
20921            return result;
20922          }
20923    
20924          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto buildPartial() {
20925            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto(this);
20926            int from_bitField0_ = bitField0_;
20927            int to_bitField0_ = 0;
20928            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
20929              to_bitField0_ |= 0x00000001;
20930            }
20931            if (jidBuilder_ == null) {
20932              result.jid_ = jid_;
20933            } else {
20934              result.jid_ = jidBuilder_.build();
20935            }
20936            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
20937              to_bitField0_ |= 0x00000002;
20938            }
20939            result.sinceTxId_ = sinceTxId_;
20940            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
20941              to_bitField0_ |= 0x00000004;
20942            }
20943            result.inProgressOk_ = inProgressOk_;
20944            result.bitField0_ = to_bitField0_;
20945            onBuilt();
20946            return result;
20947          }
20948    
20949          public Builder mergeFrom(com.google.protobuf.Message other) {
20950            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto) {
20951              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)other);
20952            } else {
20953              super.mergeFrom(other);
20954              return this;
20955            }
20956          }
20957    
20958          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto other) {
20959            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance()) return this;
20960            if (other.hasJid()) {
20961              mergeJid(other.getJid());
20962            }
20963            if (other.hasSinceTxId()) {
20964              setSinceTxId(other.getSinceTxId());
20965            }
20966            if (other.hasInProgressOk()) {
20967              setInProgressOk(other.getInProgressOk());
20968            }
20969            this.mergeUnknownFields(other.getUnknownFields());
20970            return this;
20971          }
20972    
20973          public final boolean isInitialized() {
20974            if (!hasJid()) {
20975              
20976              return false;
20977            }
20978            if (!hasSinceTxId()) {
20979              
20980              return false;
20981            }
20982            if (!getJid().isInitialized()) {
20983              
20984              return false;
20985            }
20986            return true;
20987          }
20988    
20989          public Builder mergeFrom(
20990              com.google.protobuf.CodedInputStream input,
20991              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
20992              throws java.io.IOException {
20993            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parsedMessage = null;
20994            try {
20995              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
20996            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
20997              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto) e.getUnfinishedMessage();
20998              throw e;
20999            } finally {
21000              if (parsedMessage != null) {
21001                mergeFrom(parsedMessage);
21002              }
21003            }
21004            return this;
21005          }
21006          private int bitField0_;
21007    
21008          // required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;
21009          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
21010          private com.google.protobuf.SingleFieldBuilder<
21011              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
21012          /**
21013           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
21014           */
21015          public boolean hasJid() {
21016            return ((bitField0_ & 0x00000001) == 0x00000001);
21017          }
21018          /**
21019           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
21020           */
21021          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
21022            if (jidBuilder_ == null) {
21023              return jid_;
21024            } else {
21025              return jidBuilder_.getMessage();
21026            }
21027          }
21028          /**
21029           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
21030           */
21031          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
21032            if (jidBuilder_ == null) {
21033              if (value == null) {
21034                throw new NullPointerException();
21035              }
21036              jid_ = value;
21037              onChanged();
21038            } else {
21039              jidBuilder_.setMessage(value);
21040            }
21041            bitField0_ |= 0x00000001;
21042            return this;
21043          }
21044          /**
21045           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
21046           */
21047          public Builder setJid(
21048              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
21049            if (jidBuilder_ == null) {
21050              jid_ = builderForValue.build();
21051              onChanged();
21052            } else {
21053              jidBuilder_.setMessage(builderForValue.build());
21054            }
21055            bitField0_ |= 0x00000001;
21056            return this;
21057          }
21058          /**
21059           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
21060           */
21061          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
21062            if (jidBuilder_ == null) {
21063              if (((bitField0_ & 0x00000001) == 0x00000001) &&
21064                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
21065                jid_ =
21066                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
21067              } else {
21068                jid_ = value;
21069              }
21070              onChanged();
21071            } else {
21072              jidBuilder_.mergeFrom(value);
21073            }
21074            bitField0_ |= 0x00000001;
21075            return this;
21076          }
21077          /**
21078           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
21079           */
21080          public Builder clearJid() {
21081            if (jidBuilder_ == null) {
21082              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
21083              onChanged();
21084            } else {
21085              jidBuilder_.clear();
21086            }
21087            bitField0_ = (bitField0_ & ~0x00000001);
21088            return this;
21089          }
21090          /**
21091           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
21092           */
21093          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
21094            bitField0_ |= 0x00000001;
21095            onChanged();
21096            return getJidFieldBuilder().getBuilder();
21097          }
21098          /**
21099           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
21100           */
21101          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
21102            if (jidBuilder_ != null) {
21103              return jidBuilder_.getMessageOrBuilder();
21104            } else {
21105              return jid_;
21106            }
21107          }
21108          /**
21109           * <code>required .hadoop.hdfs.qjournal.JournalIdProto jid = 1;</code>
21110           */
21111          private com.google.protobuf.SingleFieldBuilder<
21112              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
21113              getJidFieldBuilder() {
21114            if (jidBuilder_ == null) {
21115              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
21116                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
21117                      jid_,
21118                      getParentForChildren(),
21119                      isClean());
21120              jid_ = null;
21121            }
21122            return jidBuilder_;
21123          }
21124    
21125          // required uint64 sinceTxId = 2;
21126          private long sinceTxId_ ;
21127          /**
21128           * <code>required uint64 sinceTxId = 2;</code>
21129           *
21130           * <pre>
21131           * Transaction ID
21132           * </pre>
21133           */
21134          public boolean hasSinceTxId() {
21135            return ((bitField0_ & 0x00000002) == 0x00000002);
21136          }
21137          /**
21138           * <code>required uint64 sinceTxId = 2;</code>
21139           *
21140           * <pre>
21141           * Transaction ID
21142           * </pre>
21143           */
21144          public long getSinceTxId() {
21145            return sinceTxId_;
21146          }
21147          /**
21148           * <code>required uint64 sinceTxId = 2;</code>
21149           *
21150           * <pre>
21151           * Transaction ID
21152           * </pre>
21153           */
21154          public Builder setSinceTxId(long value) {
21155            bitField0_ |= 0x00000002;
21156            sinceTxId_ = value;
21157            onChanged();
21158            return this;
21159          }
21160          /**
21161           * <code>required uint64 sinceTxId = 2;</code>
21162           *
21163           * <pre>
21164           * Transaction ID
21165           * </pre>
21166           */
21167          public Builder clearSinceTxId() {
21168            bitField0_ = (bitField0_ & ~0x00000002);
21169            sinceTxId_ = 0L;
21170            onChanged();
21171            return this;
21172          }
21173    
21174          // optional bool inProgressOk = 4 [default = false];
21175          private boolean inProgressOk_ ;
21176          /**
21177           * <code>optional bool inProgressOk = 4 [default = false];</code>
21178           *
21179           * <pre>
21180           * Whether or not the client will be reading from the returned streams.
21181           * optional bool forReading = 3 [default = true]; &lt;obsolete, do not reuse&gt;
21182           * </pre>
21183           */
21184          public boolean hasInProgressOk() {
21185            return ((bitField0_ & 0x00000004) == 0x00000004);
21186          }
21187          /**
21188           * <code>optional bool inProgressOk = 4 [default = false];</code>
21189           *
21190           * <pre>
21191           * Whether or not the client will be reading from the returned streams.
21192           * optional bool forReading = 3 [default = true]; &lt;obsolete, do not reuse&gt;
21193           * </pre>
21194           */
21195          public boolean getInProgressOk() {
21196            return inProgressOk_;
21197          }
21198          /**
21199           * <code>optional bool inProgressOk = 4 [default = false];</code>
21200           *
21201           * <pre>
21202           * Whether or not the client will be reading from the returned streams.
21203           * optional bool forReading = 3 [default = true]; &lt;obsolete, do not reuse&gt;
21204           * </pre>
21205           */
21206          public Builder setInProgressOk(boolean value) {
21207            bitField0_ |= 0x00000004;
21208            inProgressOk_ = value;
21209            onChanged();
21210            return this;
21211          }
21212          /**
21213           * <code>optional bool inProgressOk = 4 [default = false];</code>
21214           *
21215           * <pre>
21216           * Whether or not the client will be reading from the returned streams.
21217           * optional bool forReading = 3 [default = true]; &lt;obsolete, do not reuse&gt;
21218           * </pre>
21219           */
21220          public Builder clearInProgressOk() {
21221            bitField0_ = (bitField0_ & ~0x00000004);
21222            inProgressOk_ = false;
21223            onChanged();
21224            return this;
21225          }
21226    
21227          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.GetEditLogManifestRequestProto)
21228        }
21229    
21230        static {
21231          defaultInstance = new GetEditLogManifestRequestProto(true);
21232          defaultInstance.initFields();
21233        }
21234    
21235        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.GetEditLogManifestRequestProto)
21236      }
21237    
21238      public interface GetEditLogManifestResponseProtoOrBuilder
21239          extends com.google.protobuf.MessageOrBuilder {
21240    
21241        // required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;
21242        /**
21243         * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
21244         */
21245        boolean hasManifest();
21246        /**
21247         * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
21248         */
21249        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getManifest();
21250        /**
21251         * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
21252         */
21253        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder();
21254    
21255        // required uint32 httpPort = 2;
21256        /**
21257         * <code>required uint32 httpPort = 2;</code>
21258         *
21259         * <pre>
21260         * Deprecated by fromURL
21261         * </pre>
21262         */
21263        boolean hasHttpPort();
21264        /**
21265         * <code>required uint32 httpPort = 2;</code>
21266         *
21267         * <pre>
21268         * Deprecated by fromURL
21269         * </pre>
21270         */
21271        int getHttpPort();
21272    
21273        // optional string fromURL = 3;
21274        /**
21275         * <code>optional string fromURL = 3;</code>
21276         */
21277        boolean hasFromURL();
21278        /**
21279         * <code>optional string fromURL = 3;</code>
21280         */
21281        java.lang.String getFromURL();
21282        /**
21283         * <code>optional string fromURL = 3;</code>
21284         */
21285        com.google.protobuf.ByteString
21286            getFromURLBytes();
21287      }
21288      /**
21289       * Protobuf type {@code hadoop.hdfs.qjournal.GetEditLogManifestResponseProto}
21290       */
21291      public static final class GetEditLogManifestResponseProto extends
21292          com.google.protobuf.GeneratedMessage
21293          implements GetEditLogManifestResponseProtoOrBuilder {
21294        // Use GetEditLogManifestResponseProto.newBuilder() to construct.
21295        private GetEditLogManifestResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
21296          super(builder);
21297          this.unknownFields = builder.getUnknownFields();
21298        }
21299        private GetEditLogManifestResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
21300    
21301        private static final GetEditLogManifestResponseProto defaultInstance;
21302        public static GetEditLogManifestResponseProto getDefaultInstance() {
21303          return defaultInstance;
21304        }
21305    
21306        public GetEditLogManifestResponseProto getDefaultInstanceForType() {
21307          return defaultInstance;
21308        }
21309    
21310        private final com.google.protobuf.UnknownFieldSet unknownFields;
21311        @java.lang.Override
21312        public final com.google.protobuf.UnknownFieldSet
21313            getUnknownFields() {
21314          return this.unknownFields;
21315        }
21316        private GetEditLogManifestResponseProto(
21317            com.google.protobuf.CodedInputStream input,
21318            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
21319            throws com.google.protobuf.InvalidProtocolBufferException {
21320          initFields();
21321          int mutable_bitField0_ = 0;
21322          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
21323              com.google.protobuf.UnknownFieldSet.newBuilder();
21324          try {
21325            boolean done = false;
21326            while (!done) {
21327              int tag = input.readTag();
21328              switch (tag) {
21329                case 0:
21330                  done = true;
21331                  break;
21332                default: {
21333                  if (!parseUnknownField(input, unknownFields,
21334                                         extensionRegistry, tag)) {
21335                    done = true;
21336                  }
21337                  break;
21338                }
21339                case 10: {
21340                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder subBuilder = null;
21341                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
21342                    subBuilder = manifest_.toBuilder();
21343                  }
21344                  manifest_ = input.readMessage(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.PARSER, extensionRegistry);
21345                  if (subBuilder != null) {
21346                    subBuilder.mergeFrom(manifest_);
21347                    manifest_ = subBuilder.buildPartial();
21348                  }
21349                  bitField0_ |= 0x00000001;
21350                  break;
21351                }
21352                case 16: {
21353                  bitField0_ |= 0x00000002;
21354                  httpPort_ = input.readUInt32();
21355                  break;
21356                }
21357                case 26: {
21358                  bitField0_ |= 0x00000004;
21359                  fromURL_ = input.readBytes();
21360                  break;
21361                }
21362              }
21363            }
21364          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
21365            throw e.setUnfinishedMessage(this);
21366          } catch (java.io.IOException e) {
21367            throw new com.google.protobuf.InvalidProtocolBufferException(
21368                e.getMessage()).setUnfinishedMessage(this);
21369          } finally {
21370            this.unknownFields = unknownFields.build();
21371            makeExtensionsImmutable();
21372          }
21373        }
21374        public static final com.google.protobuf.Descriptors.Descriptor
21375            getDescriptor() {
21376          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetEditLogManifestResponseProto_descriptor;
21377        }
21378    
21379        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
21380            internalGetFieldAccessorTable() {
21381          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetEditLogManifestResponseProto_fieldAccessorTable
21382              .ensureFieldAccessorsInitialized(
21383                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.Builder.class);
21384        }
21385    
21386        public static com.google.protobuf.Parser<GetEditLogManifestResponseProto> PARSER =
21387            new com.google.protobuf.AbstractParser<GetEditLogManifestResponseProto>() {
21388          public GetEditLogManifestResponseProto parsePartialFrom(
21389              com.google.protobuf.CodedInputStream input,
21390              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
21391              throws com.google.protobuf.InvalidProtocolBufferException {
21392            return new GetEditLogManifestResponseProto(input, extensionRegistry);
21393          }
21394        };
21395    
21396        @java.lang.Override
21397        public com.google.protobuf.Parser<GetEditLogManifestResponseProto> getParserForType() {
21398          return PARSER;
21399        }
21400    
21401        private int bitField0_;
21402        // required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;
21403        public static final int MANIFEST_FIELD_NUMBER = 1;
21404        private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto manifest_;
21405        /**
21406         * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
21407         */
21408        public boolean hasManifest() {
21409          return ((bitField0_ & 0x00000001) == 0x00000001);
21410        }
21411        /**
21412         * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
21413         */
21414        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getManifest() {
21415          return manifest_;
21416        }
21417        /**
21418         * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
21419         */
21420        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder() {
21421          return manifest_;
21422        }
21423    
21424        // required uint32 httpPort = 2;
21425        public static final int HTTPPORT_FIELD_NUMBER = 2;
21426        private int httpPort_;
21427        /**
21428         * <code>required uint32 httpPort = 2;</code>
21429         *
21430         * <pre>
21431         * Deprecated by fromURL
21432         * </pre>
21433         */
21434        public boolean hasHttpPort() {
21435          return ((bitField0_ & 0x00000002) == 0x00000002);
21436        }
21437        /**
21438         * <code>required uint32 httpPort = 2;</code>
21439         *
21440         * <pre>
21441         * Deprecated by fromURL
21442         * </pre>
21443         */
21444        public int getHttpPort() {
21445          return httpPort_;
21446        }
21447    
21448        // optional string fromURL = 3;
21449        public static final int FROMURL_FIELD_NUMBER = 3;
21450        private java.lang.Object fromURL_;
21451        /**
21452         * <code>optional string fromURL = 3;</code>
21453         */
21454        public boolean hasFromURL() {
21455          return ((bitField0_ & 0x00000004) == 0x00000004);
21456        }
21457        /**
21458         * <code>optional string fromURL = 3;</code>
21459         */
21460        public java.lang.String getFromURL() {
21461          java.lang.Object ref = fromURL_;
21462          if (ref instanceof java.lang.String) {
21463            return (java.lang.String) ref;
21464          } else {
21465            com.google.protobuf.ByteString bs = 
21466                (com.google.protobuf.ByteString) ref;
21467            java.lang.String s = bs.toStringUtf8();
21468            if (bs.isValidUtf8()) {
21469              fromURL_ = s;
21470            }
21471            return s;
21472          }
21473        }
21474        /**
21475         * <code>optional string fromURL = 3;</code>
21476         */
21477        public com.google.protobuf.ByteString
21478            getFromURLBytes() {
21479          java.lang.Object ref = fromURL_;
21480          if (ref instanceof java.lang.String) {
21481            com.google.protobuf.ByteString b = 
21482                com.google.protobuf.ByteString.copyFromUtf8(
21483                    (java.lang.String) ref);
21484            fromURL_ = b;
21485            return b;
21486          } else {
21487            return (com.google.protobuf.ByteString) ref;
21488          }
21489        }
21490    
21491        private void initFields() {
21492          manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
21493          httpPort_ = 0;
21494          fromURL_ = "";
21495        }
21496        private byte memoizedIsInitialized = -1;
21497        public final boolean isInitialized() {
21498          byte isInitialized = memoizedIsInitialized;
21499          if (isInitialized != -1) return isInitialized == 1;
21500    
21501          if (!hasManifest()) {
21502            memoizedIsInitialized = 0;
21503            return false;
21504          }
21505          if (!hasHttpPort()) {
21506            memoizedIsInitialized = 0;
21507            return false;
21508          }
21509          if (!getManifest().isInitialized()) {
21510            memoizedIsInitialized = 0;
21511            return false;
21512          }
21513          memoizedIsInitialized = 1;
21514          return true;
21515        }
21516    
21517        public void writeTo(com.google.protobuf.CodedOutputStream output)
21518                            throws java.io.IOException {
21519          getSerializedSize();
21520          if (((bitField0_ & 0x00000001) == 0x00000001)) {
21521            output.writeMessage(1, manifest_);
21522          }
21523          if (((bitField0_ & 0x00000002) == 0x00000002)) {
21524            output.writeUInt32(2, httpPort_);
21525          }
21526          if (((bitField0_ & 0x00000004) == 0x00000004)) {
21527            output.writeBytes(3, getFromURLBytes());
21528          }
21529          getUnknownFields().writeTo(output);
21530        }
21531    
21532        private int memoizedSerializedSize = -1;
21533        public int getSerializedSize() {
21534          int size = memoizedSerializedSize;
21535          if (size != -1) return size;
21536    
21537          size = 0;
21538          if (((bitField0_ & 0x00000001) == 0x00000001)) {
21539            size += com.google.protobuf.CodedOutputStream
21540              .computeMessageSize(1, manifest_);
21541          }
21542          if (((bitField0_ & 0x00000002) == 0x00000002)) {
21543            size += com.google.protobuf.CodedOutputStream
21544              .computeUInt32Size(2, httpPort_);
21545          }
21546          if (((bitField0_ & 0x00000004) == 0x00000004)) {
21547            size += com.google.protobuf.CodedOutputStream
21548              .computeBytesSize(3, getFromURLBytes());
21549          }
21550          size += getUnknownFields().getSerializedSize();
21551          memoizedSerializedSize = size;
21552          return size;
21553        }
21554    
21555        private static final long serialVersionUID = 0L;
21556        @java.lang.Override
21557        protected java.lang.Object writeReplace()
21558            throws java.io.ObjectStreamException {
21559          return super.writeReplace();
21560        }
21561    
21562        @java.lang.Override
21563        public boolean equals(final java.lang.Object obj) {
21564          if (obj == this) {
21565           return true;
21566          }
21567          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto)) {
21568            return super.equals(obj);
21569          }
21570          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) obj;
21571    
21572          boolean result = true;
21573          result = result && (hasManifest() == other.hasManifest());
21574          if (hasManifest()) {
21575            result = result && getManifest()
21576                .equals(other.getManifest());
21577          }
21578          result = result && (hasHttpPort() == other.hasHttpPort());
21579          if (hasHttpPort()) {
21580            result = result && (getHttpPort()
21581                == other.getHttpPort());
21582          }
21583          result = result && (hasFromURL() == other.hasFromURL());
21584          if (hasFromURL()) {
21585            result = result && getFromURL()
21586                .equals(other.getFromURL());
21587          }
21588          result = result &&
21589              getUnknownFields().equals(other.getUnknownFields());
21590          return result;
21591        }
21592    
21593        private int memoizedHashCode = 0;
21594        @java.lang.Override
21595        public int hashCode() {
21596          if (memoizedHashCode != 0) {
21597            return memoizedHashCode;
21598          }
21599          int hash = 41;
21600          hash = (19 * hash) + getDescriptorForType().hashCode();
21601          if (hasManifest()) {
21602            hash = (37 * hash) + MANIFEST_FIELD_NUMBER;
21603            hash = (53 * hash) + getManifest().hashCode();
21604          }
21605          if (hasHttpPort()) {
21606            hash = (37 * hash) + HTTPPORT_FIELD_NUMBER;
21607            hash = (53 * hash) + getHttpPort();
21608          }
21609          if (hasFromURL()) {
21610            hash = (37 * hash) + FROMURL_FIELD_NUMBER;
21611            hash = (53 * hash) + getFromURL().hashCode();
21612          }
21613          hash = (29 * hash) + getUnknownFields().hashCode();
21614          memoizedHashCode = hash;
21615          return hash;
21616        }
21617    
21618        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
21619            com.google.protobuf.ByteString data)
21620            throws com.google.protobuf.InvalidProtocolBufferException {
21621          return PARSER.parseFrom(data);
21622        }
21623        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
21624            com.google.protobuf.ByteString data,
21625            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
21626            throws com.google.protobuf.InvalidProtocolBufferException {
21627          return PARSER.parseFrom(data, extensionRegistry);
21628        }
21629        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(byte[] data)
21630            throws com.google.protobuf.InvalidProtocolBufferException {
21631          return PARSER.parseFrom(data);
21632        }
21633        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
21634            byte[] data,
21635            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
21636            throws com.google.protobuf.InvalidProtocolBufferException {
21637          return PARSER.parseFrom(data, extensionRegistry);
21638        }
21639        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(java.io.InputStream input)
21640            throws java.io.IOException {
21641          return PARSER.parseFrom(input);
21642        }
21643        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
21644            java.io.InputStream input,
21645            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
21646            throws java.io.IOException {
21647          return PARSER.parseFrom(input, extensionRegistry);
21648        }
21649        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseDelimitedFrom(java.io.InputStream input)
21650            throws java.io.IOException {
21651          return PARSER.parseDelimitedFrom(input);
21652        }
21653        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseDelimitedFrom(
21654            java.io.InputStream input,
21655            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
21656            throws java.io.IOException {
21657          return PARSER.parseDelimitedFrom(input, extensionRegistry);
21658        }
21659        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
21660            com.google.protobuf.CodedInputStream input)
21661            throws java.io.IOException {
21662          return PARSER.parseFrom(input);
21663        }
21664        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
21665            com.google.protobuf.CodedInputStream input,
21666            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
21667            throws java.io.IOException {
21668          return PARSER.parseFrom(input, extensionRegistry);
21669        }
21670    
21671        public static Builder newBuilder() { return Builder.create(); }
21672        public Builder newBuilderForType() { return newBuilder(); }
21673        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto prototype) {
21674          return newBuilder().mergeFrom(prototype);
21675        }
21676        public Builder toBuilder() { return newBuilder(this); }
21677    
21678        @java.lang.Override
21679        protected Builder newBuilderForType(
21680            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
21681          Builder builder = new Builder(parent);
21682          return builder;
21683        }
21684        /**
21685         * Protobuf type {@code hadoop.hdfs.qjournal.GetEditLogManifestResponseProto}
21686         */
21687        public static final class Builder extends
21688            com.google.protobuf.GeneratedMessage.Builder<Builder>
21689           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProtoOrBuilder {
21690          public static final com.google.protobuf.Descriptors.Descriptor
21691              getDescriptor() {
21692            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetEditLogManifestResponseProto_descriptor;
21693          }
21694    
21695          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
21696              internalGetFieldAccessorTable() {
21697            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetEditLogManifestResponseProto_fieldAccessorTable
21698                .ensureFieldAccessorsInitialized(
21699                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.Builder.class);
21700          }
21701    
21702          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.newBuilder()
21703          private Builder() {
21704            maybeForceBuilderInitialization();
21705          }
21706    
21707          private Builder(
21708              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
21709            super(parent);
21710            maybeForceBuilderInitialization();
21711          }
21712          private void maybeForceBuilderInitialization() {
21713            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
21714              getManifestFieldBuilder();
21715            }
21716          }
21717          private static Builder create() {
21718            return new Builder();
21719          }
21720    
21721          public Builder clear() {
21722            super.clear();
21723            if (manifestBuilder_ == null) {
21724              manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
21725            } else {
21726              manifestBuilder_.clear();
21727            }
21728            bitField0_ = (bitField0_ & ~0x00000001);
21729            httpPort_ = 0;
21730            bitField0_ = (bitField0_ & ~0x00000002);
21731            fromURL_ = "";
21732            bitField0_ = (bitField0_ & ~0x00000004);
21733            return this;
21734          }
21735    
21736          public Builder clone() {
21737            return create().mergeFrom(buildPartial());
21738          }
21739    
21740          public com.google.protobuf.Descriptors.Descriptor
21741              getDescriptorForType() {
21742            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_GetEditLogManifestResponseProto_descriptor;
21743          }
21744    
21745          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto getDefaultInstanceForType() {
21746            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance();
21747          }
21748    
21749          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto build() {
21750            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto result = buildPartial();
21751            if (!result.isInitialized()) {
21752              throw newUninitializedMessageException(result);
21753            }
21754            return result;
21755          }
21756    
21757          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto buildPartial() {
21758            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto(this);
21759            int from_bitField0_ = bitField0_;
21760            int to_bitField0_ = 0;
21761            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
21762              to_bitField0_ |= 0x00000001;
21763            }
21764            if (manifestBuilder_ == null) {
21765              result.manifest_ = manifest_;
21766            } else {
21767              result.manifest_ = manifestBuilder_.build();
21768            }
21769            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
21770              to_bitField0_ |= 0x00000002;
21771            }
21772            result.httpPort_ = httpPort_;
21773            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
21774              to_bitField0_ |= 0x00000004;
21775            }
21776            result.fromURL_ = fromURL_;
21777            result.bitField0_ = to_bitField0_;
21778            onBuilt();
21779            return result;
21780          }
21781    
21782          public Builder mergeFrom(com.google.protobuf.Message other) {
21783            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) {
21784              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto)other);
21785            } else {
21786              super.mergeFrom(other);
21787              return this;
21788            }
21789          }
21790    
21791          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto other) {
21792            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance()) return this;
21793            if (other.hasManifest()) {
21794              mergeManifest(other.getManifest());
21795            }
21796            if (other.hasHttpPort()) {
21797              setHttpPort(other.getHttpPort());
21798            }
21799            if (other.hasFromURL()) {
21800              bitField0_ |= 0x00000004;
21801              fromURL_ = other.fromURL_;
21802              onChanged();
21803            }
21804            this.mergeUnknownFields(other.getUnknownFields());
21805            return this;
21806          }
21807    
21808          public final boolean isInitialized() {
21809            if (!hasManifest()) {
21810              
21811              return false;
21812            }
21813            if (!hasHttpPort()) {
21814              
21815              return false;
21816            }
21817            if (!getManifest().isInitialized()) {
21818              
21819              return false;
21820            }
21821            return true;
21822          }
21823    
21824          public Builder mergeFrom(
21825              com.google.protobuf.CodedInputStream input,
21826              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
21827              throws java.io.IOException {
21828            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parsedMessage = null;
21829            try {
21830              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
21831            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
21832              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) e.getUnfinishedMessage();
21833              throw e;
21834            } finally {
21835              if (parsedMessage != null) {
21836                mergeFrom(parsedMessage);
21837              }
21838            }
21839            return this;
21840          }
21841          private int bitField0_;
21842    
21843          // required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;
21844          private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
21845          private com.google.protobuf.SingleFieldBuilder<
21846              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder> manifestBuilder_;
21847          /**
21848           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
21849           */
21850          public boolean hasManifest() {
21851            return ((bitField0_ & 0x00000001) == 0x00000001);
21852          }
21853          /**
21854           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
21855           */
21856          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getManifest() {
21857            if (manifestBuilder_ == null) {
21858              return manifest_;
21859            } else {
21860              return manifestBuilder_.getMessage();
21861            }
21862          }
21863          /**
21864           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
21865           */
21866          public Builder setManifest(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto value) {
21867            if (manifestBuilder_ == null) {
21868              if (value == null) {
21869                throw new NullPointerException();
21870              }
21871              manifest_ = value;
21872              onChanged();
21873            } else {
21874              manifestBuilder_.setMessage(value);
21875            }
21876            bitField0_ |= 0x00000001;
21877            return this;
21878          }
21879          /**
21880           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
21881           */
21882          public Builder setManifest(
21883              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder builderForValue) {
21884            if (manifestBuilder_ == null) {
21885              manifest_ = builderForValue.build();
21886              onChanged();
21887            } else {
21888              manifestBuilder_.setMessage(builderForValue.build());
21889            }
21890            bitField0_ |= 0x00000001;
21891            return this;
21892          }
21893          /**
21894           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
21895           */
21896          public Builder mergeManifest(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto value) {
21897            if (manifestBuilder_ == null) {
21898              if (((bitField0_ & 0x00000001) == 0x00000001) &&
21899                  manifest_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance()) {
21900                manifest_ =
21901                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.newBuilder(manifest_).mergeFrom(value).buildPartial();
21902              } else {
21903                manifest_ = value;
21904              }
21905              onChanged();
21906            } else {
21907              manifestBuilder_.mergeFrom(value);
21908            }
21909            bitField0_ |= 0x00000001;
21910            return this;
21911          }
21912          /**
21913           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
21914           */
21915          public Builder clearManifest() {
21916            if (manifestBuilder_ == null) {
21917              manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
21918              onChanged();
21919            } else {
21920              manifestBuilder_.clear();
21921            }
21922            bitField0_ = (bitField0_ & ~0x00000001);
21923            return this;
21924          }
21925          /**
21926           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
21927           */
21928          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder getManifestBuilder() {
21929            bitField0_ |= 0x00000001;
21930            onChanged();
21931            return getManifestFieldBuilder().getBuilder();
21932          }
21933          /**
21934           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
21935           */
21936          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder() {
21937            if (manifestBuilder_ != null) {
21938              return manifestBuilder_.getMessageOrBuilder();
21939            } else {
21940              return manifest_;
21941            }
21942          }
21943          /**
21944           * <code>required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;</code>
21945           */
21946          private com.google.protobuf.SingleFieldBuilder<
21947              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder> 
21948              getManifestFieldBuilder() {
21949            if (manifestBuilder_ == null) {
21950              manifestBuilder_ = new com.google.protobuf.SingleFieldBuilder<
21951                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder>(
21952                      manifest_,
21953                      getParentForChildren(),
21954                      isClean());
21955              manifest_ = null;
21956            }
21957            return manifestBuilder_;
21958          }
21959    
21960          // required uint32 httpPort = 2;
21961          private int httpPort_ ;
21962          /**
21963           * <code>required uint32 httpPort = 2;</code>
21964           *
21965           * <pre>
21966           * Deprecated by fromURL
21967           * </pre>
21968           */
21969          public boolean hasHttpPort() {
21970            return ((bitField0_ & 0x00000002) == 0x00000002);
21971          }
21972          /**
21973           * <code>required uint32 httpPort = 2;</code>
21974           *
21975           * <pre>
21976           * Deprecated by fromURL
21977           * </pre>
21978           */
21979          public int getHttpPort() {
21980            return httpPort_;
21981          }
21982          /**
21983           * <code>required uint32 httpPort = 2;</code>
21984           *
21985           * <pre>
21986           * Deprecated by fromURL
21987           * </pre>
21988           */
21989          public Builder setHttpPort(int value) {
21990            bitField0_ |= 0x00000002;
21991            httpPort_ = value;
21992            onChanged();
21993            return this;
21994          }
21995          /**
21996           * <code>required uint32 httpPort = 2;</code>
21997           *
21998           * <pre>
21999           * Deprecated by fromURL
22000           * </pre>
22001           */
22002          public Builder clearHttpPort() {
22003            bitField0_ = (bitField0_ & ~0x00000002);
22004            httpPort_ = 0;
22005            onChanged();
22006            return this;
22007          }
22008    
22009          // optional string fromURL = 3;
22010          private java.lang.Object fromURL_ = "";
22011          /**
22012           * <code>optional string fromURL = 3;</code>
22013           */
22014          public boolean hasFromURL() {
22015            return ((bitField0_ & 0x00000004) == 0x00000004);
22016          }
22017          /**
22018           * <code>optional string fromURL = 3;</code>
22019           */
22020          public java.lang.String getFromURL() {
22021            java.lang.Object ref = fromURL_;
22022            if (!(ref instanceof java.lang.String)) {
22023              java.lang.String s = ((com.google.protobuf.ByteString) ref)
22024                  .toStringUtf8();
22025              fromURL_ = s;
22026              return s;
22027            } else {
22028              return (java.lang.String) ref;
22029            }
22030          }
22031          /**
22032           * <code>optional string fromURL = 3;</code>
22033           */
22034          public com.google.protobuf.ByteString
22035              getFromURLBytes() {
22036            java.lang.Object ref = fromURL_;
22037            if (ref instanceof String) {
22038              com.google.protobuf.ByteString b = 
22039                  com.google.protobuf.ByteString.copyFromUtf8(
22040                      (java.lang.String) ref);
22041              fromURL_ = b;
22042              return b;
22043            } else {
22044              return (com.google.protobuf.ByteString) ref;
22045            }
22046          }
22047          /**
22048           * <code>optional string fromURL = 3;</code>
22049           */
22050          public Builder setFromURL(
22051              java.lang.String value) {
22052            if (value == null) {
22053        throw new NullPointerException();
22054      }
22055      bitField0_ |= 0x00000004;
22056            fromURL_ = value;
22057            onChanged();
22058            return this;
22059          }
22060          /**
22061           * <code>optional string fromURL = 3;</code>
22062           */
22063          public Builder clearFromURL() {
22064            bitField0_ = (bitField0_ & ~0x00000004);
22065            fromURL_ = getDefaultInstance().getFromURL();
22066            onChanged();
22067            return this;
22068          }
22069          /**
22070           * <code>optional string fromURL = 3;</code>
22071           */
22072          public Builder setFromURLBytes(
22073              com.google.protobuf.ByteString value) {
22074            if (value == null) {
22075        throw new NullPointerException();
22076      }
22077      bitField0_ |= 0x00000004;
22078            fromURL_ = value;
22079            onChanged();
22080            return this;
22081          }
22082    
22083          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.GetEditLogManifestResponseProto)
22084        }
22085    
22086        static {
22087          defaultInstance = new GetEditLogManifestResponseProto(true);
22088          defaultInstance.initFields();
22089        }
22090    
22091        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.GetEditLogManifestResponseProto)
22092      }
22093    
22094      public interface PrepareRecoveryRequestProtoOrBuilder
22095          extends com.google.protobuf.MessageOrBuilder {
22096    
22097        // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;
22098        /**
22099         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
22100         */
22101        boolean hasReqInfo();
22102        /**
22103         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
22104         */
22105        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
22106        /**
22107         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
22108         */
22109        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
22110    
22111        // required uint64 segmentTxId = 2;
22112        /**
22113         * <code>required uint64 segmentTxId = 2;</code>
22114         */
22115        boolean hasSegmentTxId();
22116        /**
22117         * <code>required uint64 segmentTxId = 2;</code>
22118         */
22119        long getSegmentTxId();
22120      }
22121      /**
22122       * Protobuf type {@code hadoop.hdfs.qjournal.PrepareRecoveryRequestProto}
22123       *
22124       * <pre>
22125       **
22126       * prepareRecovery()
22127       * </pre>
22128       */
22129      public static final class PrepareRecoveryRequestProto extends
22130          com.google.protobuf.GeneratedMessage
22131          implements PrepareRecoveryRequestProtoOrBuilder {
22132        // Use PrepareRecoveryRequestProto.newBuilder() to construct.
22133        private PrepareRecoveryRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
22134          super(builder);
22135          this.unknownFields = builder.getUnknownFields();
22136        }
22137        private PrepareRecoveryRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
22138    
22139        private static final PrepareRecoveryRequestProto defaultInstance;
22140        public static PrepareRecoveryRequestProto getDefaultInstance() {
22141          return defaultInstance;
22142        }
22143    
22144        public PrepareRecoveryRequestProto getDefaultInstanceForType() {
22145          return defaultInstance;
22146        }
22147    
22148        private final com.google.protobuf.UnknownFieldSet unknownFields;
22149        @java.lang.Override
22150        public final com.google.protobuf.UnknownFieldSet
22151            getUnknownFields() {
22152          return this.unknownFields;
22153        }
22154        private PrepareRecoveryRequestProto(
22155            com.google.protobuf.CodedInputStream input,
22156            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
22157            throws com.google.protobuf.InvalidProtocolBufferException {
22158          initFields();
22159          int mutable_bitField0_ = 0;
22160          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
22161              com.google.protobuf.UnknownFieldSet.newBuilder();
22162          try {
22163            boolean done = false;
22164            while (!done) {
22165              int tag = input.readTag();
22166              switch (tag) {
22167                case 0:
22168                  done = true;
22169                  break;
22170                default: {
22171                  if (!parseUnknownField(input, unknownFields,
22172                                         extensionRegistry, tag)) {
22173                    done = true;
22174                  }
22175                  break;
22176                }
22177                case 10: {
22178                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
22179                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
22180                    subBuilder = reqInfo_.toBuilder();
22181                  }
22182                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
22183                  if (subBuilder != null) {
22184                    subBuilder.mergeFrom(reqInfo_);
22185                    reqInfo_ = subBuilder.buildPartial();
22186                  }
22187                  bitField0_ |= 0x00000001;
22188                  break;
22189                }
22190                case 16: {
22191                  bitField0_ |= 0x00000002;
22192                  segmentTxId_ = input.readUInt64();
22193                  break;
22194                }
22195              }
22196            }
22197          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
22198            throw e.setUnfinishedMessage(this);
22199          } catch (java.io.IOException e) {
22200            throw new com.google.protobuf.InvalidProtocolBufferException(
22201                e.getMessage()).setUnfinishedMessage(this);
22202          } finally {
22203            this.unknownFields = unknownFields.build();
22204            makeExtensionsImmutable();
22205          }
22206        }
22207        public static final com.google.protobuf.Descriptors.Descriptor
22208            getDescriptor() {
22209          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PrepareRecoveryRequestProto_descriptor;
22210        }
22211    
22212        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
22213            internalGetFieldAccessorTable() {
22214          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PrepareRecoveryRequestProto_fieldAccessorTable
22215              .ensureFieldAccessorsInitialized(
22216                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.Builder.class);
22217        }
22218    
22219        public static com.google.protobuf.Parser<PrepareRecoveryRequestProto> PARSER =
22220            new com.google.protobuf.AbstractParser<PrepareRecoveryRequestProto>() {
22221          public PrepareRecoveryRequestProto parsePartialFrom(
22222              com.google.protobuf.CodedInputStream input,
22223              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
22224              throws com.google.protobuf.InvalidProtocolBufferException {
22225            return new PrepareRecoveryRequestProto(input, extensionRegistry);
22226          }
22227        };
22228    
22229        @java.lang.Override
22230        public com.google.protobuf.Parser<PrepareRecoveryRequestProto> getParserForType() {
22231          return PARSER;
22232        }
22233    
22234        private int bitField0_;
22235        // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;
22236        public static final int REQINFO_FIELD_NUMBER = 1;
22237        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
22238        /**
22239         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
22240         */
22241        public boolean hasReqInfo() {
22242          return ((bitField0_ & 0x00000001) == 0x00000001);
22243        }
22244        /**
22245         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
22246         */
22247        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
22248          return reqInfo_;
22249        }
22250        /**
22251         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
22252         */
22253        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
22254          return reqInfo_;
22255        }
22256    
22257        // required uint64 segmentTxId = 2;
22258        public static final int SEGMENTTXID_FIELD_NUMBER = 2;
22259        private long segmentTxId_;
22260        /**
22261         * <code>required uint64 segmentTxId = 2;</code>
22262         */
22263        public boolean hasSegmentTxId() {
22264          return ((bitField0_ & 0x00000002) == 0x00000002);
22265        }
22266        /**
22267         * <code>required uint64 segmentTxId = 2;</code>
22268         */
22269        public long getSegmentTxId() {
22270          return segmentTxId_;
22271        }
22272    
22273        private void initFields() {
22274          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
22275          segmentTxId_ = 0L;
22276        }
22277        private byte memoizedIsInitialized = -1;
22278        public final boolean isInitialized() {
22279          byte isInitialized = memoizedIsInitialized;
22280          if (isInitialized != -1) return isInitialized == 1;
22281    
22282          if (!hasReqInfo()) {
22283            memoizedIsInitialized = 0;
22284            return false;
22285          }
22286          if (!hasSegmentTxId()) {
22287            memoizedIsInitialized = 0;
22288            return false;
22289          }
22290          if (!getReqInfo().isInitialized()) {
22291            memoizedIsInitialized = 0;
22292            return false;
22293          }
22294          memoizedIsInitialized = 1;
22295          return true;
22296        }
22297    
22298        public void writeTo(com.google.protobuf.CodedOutputStream output)
22299                            throws java.io.IOException {
22300          getSerializedSize();
22301          if (((bitField0_ & 0x00000001) == 0x00000001)) {
22302            output.writeMessage(1, reqInfo_);
22303          }
22304          if (((bitField0_ & 0x00000002) == 0x00000002)) {
22305            output.writeUInt64(2, segmentTxId_);
22306          }
22307          getUnknownFields().writeTo(output);
22308        }
22309    
22310        private int memoizedSerializedSize = -1;
22311        public int getSerializedSize() {
22312          int size = memoizedSerializedSize;
22313          if (size != -1) return size;
22314    
22315          size = 0;
22316          if (((bitField0_ & 0x00000001) == 0x00000001)) {
22317            size += com.google.protobuf.CodedOutputStream
22318              .computeMessageSize(1, reqInfo_);
22319          }
22320          if (((bitField0_ & 0x00000002) == 0x00000002)) {
22321            size += com.google.protobuf.CodedOutputStream
22322              .computeUInt64Size(2, segmentTxId_);
22323          }
22324          size += getUnknownFields().getSerializedSize();
22325          memoizedSerializedSize = size;
22326          return size;
22327        }
22328    
22329        private static final long serialVersionUID = 0L;
22330        @java.lang.Override
22331        protected java.lang.Object writeReplace()
22332            throws java.io.ObjectStreamException {
22333          return super.writeReplace();
22334        }
22335    
22336        @java.lang.Override
22337        public boolean equals(final java.lang.Object obj) {
22338          if (obj == this) {
22339           return true;
22340          }
22341          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)) {
22342            return super.equals(obj);
22343          }
22344          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto) obj;
22345    
22346          boolean result = true;
22347          result = result && (hasReqInfo() == other.hasReqInfo());
22348          if (hasReqInfo()) {
22349            result = result && getReqInfo()
22350                .equals(other.getReqInfo());
22351          }
22352          result = result && (hasSegmentTxId() == other.hasSegmentTxId());
22353          if (hasSegmentTxId()) {
22354            result = result && (getSegmentTxId()
22355                == other.getSegmentTxId());
22356          }
22357          result = result &&
22358              getUnknownFields().equals(other.getUnknownFields());
22359          return result;
22360        }
22361    
22362        private int memoizedHashCode = 0;
22363        @java.lang.Override
22364        public int hashCode() {
22365          if (memoizedHashCode != 0) {
22366            return memoizedHashCode;
22367          }
22368          int hash = 41;
22369          hash = (19 * hash) + getDescriptorForType().hashCode();
22370          if (hasReqInfo()) {
22371            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
22372            hash = (53 * hash) + getReqInfo().hashCode();
22373          }
22374          if (hasSegmentTxId()) {
22375            hash = (37 * hash) + SEGMENTTXID_FIELD_NUMBER;
22376            hash = (53 * hash) + hashLong(getSegmentTxId());
22377          }
22378          hash = (29 * hash) + getUnknownFields().hashCode();
22379          memoizedHashCode = hash;
22380          return hash;
22381        }
22382    
22383        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
22384            com.google.protobuf.ByteString data)
22385            throws com.google.protobuf.InvalidProtocolBufferException {
22386          return PARSER.parseFrom(data);
22387        }
22388        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
22389            com.google.protobuf.ByteString data,
22390            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
22391            throws com.google.protobuf.InvalidProtocolBufferException {
22392          return PARSER.parseFrom(data, extensionRegistry);
22393        }
22394        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(byte[] data)
22395            throws com.google.protobuf.InvalidProtocolBufferException {
22396          return PARSER.parseFrom(data);
22397        }
22398        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
22399            byte[] data,
22400            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
22401            throws com.google.protobuf.InvalidProtocolBufferException {
22402          return PARSER.parseFrom(data, extensionRegistry);
22403        }
22404        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(java.io.InputStream input)
22405            throws java.io.IOException {
22406          return PARSER.parseFrom(input);
22407        }
22408        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
22409            java.io.InputStream input,
22410            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
22411            throws java.io.IOException {
22412          return PARSER.parseFrom(input, extensionRegistry);
22413        }
22414        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseDelimitedFrom(java.io.InputStream input)
22415            throws java.io.IOException {
22416          return PARSER.parseDelimitedFrom(input);
22417        }
22418        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseDelimitedFrom(
22419            java.io.InputStream input,
22420            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
22421            throws java.io.IOException {
22422          return PARSER.parseDelimitedFrom(input, extensionRegistry);
22423        }
22424        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
22425            com.google.protobuf.CodedInputStream input)
22426            throws java.io.IOException {
22427          return PARSER.parseFrom(input);
22428        }
22429        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
22430            com.google.protobuf.CodedInputStream input,
22431            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
22432            throws java.io.IOException {
22433          return PARSER.parseFrom(input, extensionRegistry);
22434        }
22435    
22436        public static Builder newBuilder() { return Builder.create(); }
22437        public Builder newBuilderForType() { return newBuilder(); }
22438        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto prototype) {
22439          return newBuilder().mergeFrom(prototype);
22440        }
22441        public Builder toBuilder() { return newBuilder(this); }
22442    
22443        @java.lang.Override
22444        protected Builder newBuilderForType(
22445            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
22446          Builder builder = new Builder(parent);
22447          return builder;
22448        }
22449        /**
22450         * Protobuf type {@code hadoop.hdfs.qjournal.PrepareRecoveryRequestProto}
22451         *
22452         * <pre>
22453         **
22454         * prepareRecovery()
22455         * </pre>
22456         */
22457        public static final class Builder extends
22458            com.google.protobuf.GeneratedMessage.Builder<Builder>
22459           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProtoOrBuilder {
22460          public static final com.google.protobuf.Descriptors.Descriptor
22461              getDescriptor() {
22462            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PrepareRecoveryRequestProto_descriptor;
22463          }
22464    
22465          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
22466              internalGetFieldAccessorTable() {
22467            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PrepareRecoveryRequestProto_fieldAccessorTable
22468                .ensureFieldAccessorsInitialized(
22469                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.Builder.class);
22470          }
22471    
22472          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.newBuilder()
22473          private Builder() {
22474            maybeForceBuilderInitialization();
22475          }
22476    
22477          private Builder(
22478              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
22479            super(parent);
22480            maybeForceBuilderInitialization();
22481          }
22482          private void maybeForceBuilderInitialization() {
22483            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
22484              getReqInfoFieldBuilder();
22485            }
22486          }
22487          private static Builder create() {
22488            return new Builder();
22489          }
22490    
22491          public Builder clear() {
22492            super.clear();
22493            if (reqInfoBuilder_ == null) {
22494              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
22495            } else {
22496              reqInfoBuilder_.clear();
22497            }
22498            bitField0_ = (bitField0_ & ~0x00000001);
22499            segmentTxId_ = 0L;
22500            bitField0_ = (bitField0_ & ~0x00000002);
22501            return this;
22502          }
22503    
22504          public Builder clone() {
22505            return create().mergeFrom(buildPartial());
22506          }
22507    
22508          public com.google.protobuf.Descriptors.Descriptor
22509              getDescriptorForType() {
22510            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PrepareRecoveryRequestProto_descriptor;
22511          }
22512    
22513          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto getDefaultInstanceForType() {
22514            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance();
22515          }
22516    
22517          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto build() {
22518            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto result = buildPartial();
22519            if (!result.isInitialized()) {
22520              throw newUninitializedMessageException(result);
22521            }
22522            return result;
22523          }
22524    
22525          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto buildPartial() {
22526            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto(this);
22527            int from_bitField0_ = bitField0_;
22528            int to_bitField0_ = 0;
22529            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
22530              to_bitField0_ |= 0x00000001;
22531            }
22532            if (reqInfoBuilder_ == null) {
22533              result.reqInfo_ = reqInfo_;
22534            } else {
22535              result.reqInfo_ = reqInfoBuilder_.build();
22536            }
22537            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
22538              to_bitField0_ |= 0x00000002;
22539            }
22540            result.segmentTxId_ = segmentTxId_;
22541            result.bitField0_ = to_bitField0_;
22542            onBuilt();
22543            return result;
22544          }
22545    
22546          public Builder mergeFrom(com.google.protobuf.Message other) {
22547            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto) {
22548              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)other);
22549            } else {
22550              super.mergeFrom(other);
22551              return this;
22552            }
22553          }
22554    
22555          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto other) {
22556            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance()) return this;
22557            if (other.hasReqInfo()) {
22558              mergeReqInfo(other.getReqInfo());
22559            }
22560            if (other.hasSegmentTxId()) {
22561              setSegmentTxId(other.getSegmentTxId());
22562            }
22563            this.mergeUnknownFields(other.getUnknownFields());
22564            return this;
22565          }
22566    
22567          public final boolean isInitialized() {
22568            if (!hasReqInfo()) {
22569              
22570              return false;
22571            }
22572            if (!hasSegmentTxId()) {
22573              
22574              return false;
22575            }
22576            if (!getReqInfo().isInitialized()) {
22577              
22578              return false;
22579            }
22580            return true;
22581          }
22582    
22583          public Builder mergeFrom(
22584              com.google.protobuf.CodedInputStream input,
22585              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
22586              throws java.io.IOException {
22587            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parsedMessage = null;
22588            try {
22589              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
22590            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
22591              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto) e.getUnfinishedMessage();
22592              throw e;
22593            } finally {
22594              if (parsedMessage != null) {
22595                mergeFrom(parsedMessage);
22596              }
22597            }
22598            return this;
22599          }
22600          private int bitField0_;
22601    
22602          // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;
22603          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
22604          private com.google.protobuf.SingleFieldBuilder<
22605              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
22606          /**
22607           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
22608           */
22609          public boolean hasReqInfo() {
22610            return ((bitField0_ & 0x00000001) == 0x00000001);
22611          }
22612          /**
22613           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
22614           */
22615          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
22616            if (reqInfoBuilder_ == null) {
22617              return reqInfo_;
22618            } else {
22619              return reqInfoBuilder_.getMessage();
22620            }
22621          }
22622          /**
22623           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
22624           */
22625          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
22626            if (reqInfoBuilder_ == null) {
22627              if (value == null) {
22628                throw new NullPointerException();
22629              }
22630              reqInfo_ = value;
22631              onChanged();
22632            } else {
22633              reqInfoBuilder_.setMessage(value);
22634            }
22635            bitField0_ |= 0x00000001;
22636            return this;
22637          }
22638          /**
22639           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
22640           */
22641          public Builder setReqInfo(
22642              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
22643            if (reqInfoBuilder_ == null) {
22644              reqInfo_ = builderForValue.build();
22645              onChanged();
22646            } else {
22647              reqInfoBuilder_.setMessage(builderForValue.build());
22648            }
22649            bitField0_ |= 0x00000001;
22650            return this;
22651          }
22652          /**
22653           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
22654           */
22655          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
22656            if (reqInfoBuilder_ == null) {
22657              if (((bitField0_ & 0x00000001) == 0x00000001) &&
22658                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
22659                reqInfo_ =
22660                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
22661              } else {
22662                reqInfo_ = value;
22663              }
22664              onChanged();
22665            } else {
22666              reqInfoBuilder_.mergeFrom(value);
22667            }
22668            bitField0_ |= 0x00000001;
22669            return this;
22670          }
22671          /**
22672           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
22673           */
22674          public Builder clearReqInfo() {
22675            if (reqInfoBuilder_ == null) {
22676              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
22677              onChanged();
22678            } else {
22679              reqInfoBuilder_.clear();
22680            }
22681            bitField0_ = (bitField0_ & ~0x00000001);
22682            return this;
22683          }
22684          /**
22685           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
22686           */
22687          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
22688            bitField0_ |= 0x00000001;
22689            onChanged();
22690            return getReqInfoFieldBuilder().getBuilder();
22691          }
22692          /**
22693           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
22694           */
22695          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
22696            if (reqInfoBuilder_ != null) {
22697              return reqInfoBuilder_.getMessageOrBuilder();
22698            } else {
22699              return reqInfo_;
22700            }
22701          }
22702          /**
22703           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
22704           */
22705          private com.google.protobuf.SingleFieldBuilder<
22706              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
22707              getReqInfoFieldBuilder() {
22708            if (reqInfoBuilder_ == null) {
22709              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
22710                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
22711                      reqInfo_,
22712                      getParentForChildren(),
22713                      isClean());
22714              reqInfo_ = null;
22715            }
22716            return reqInfoBuilder_;
22717          }
22718    
22719          // required uint64 segmentTxId = 2;
22720          private long segmentTxId_ ;
22721          /**
22722           * <code>required uint64 segmentTxId = 2;</code>
22723           */
22724          public boolean hasSegmentTxId() {
22725            return ((bitField0_ & 0x00000002) == 0x00000002);
22726          }
22727          /**
22728           * <code>required uint64 segmentTxId = 2;</code>
22729           */
22730          public long getSegmentTxId() {
22731            return segmentTxId_;
22732          }
22733          /**
22734           * <code>required uint64 segmentTxId = 2;</code>
22735           */
22736          public Builder setSegmentTxId(long value) {
22737            bitField0_ |= 0x00000002;
22738            segmentTxId_ = value;
22739            onChanged();
22740            return this;
22741          }
22742          /**
22743           * <code>required uint64 segmentTxId = 2;</code>
22744           */
22745          public Builder clearSegmentTxId() {
22746            bitField0_ = (bitField0_ & ~0x00000002);
22747            segmentTxId_ = 0L;
22748            onChanged();
22749            return this;
22750          }
22751    
22752          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.PrepareRecoveryRequestProto)
22753        }
22754    
22755        static {
22756          defaultInstance = new PrepareRecoveryRequestProto(true);
22757          defaultInstance.initFields();
22758        }
22759    
22760        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.PrepareRecoveryRequestProto)
22761      }
22762    
22763      public interface PrepareRecoveryResponseProtoOrBuilder
22764          extends com.google.protobuf.MessageOrBuilder {
22765    
22766        // optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;
22767        /**
22768         * <code>optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
22769         */
22770        boolean hasSegmentState();
22771        /**
22772         * <code>optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
22773         */
22774        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState();
22775        /**
22776         * <code>optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
22777         */
22778        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder();
22779    
22780        // optional uint64 acceptedInEpoch = 2;
22781        /**
22782         * <code>optional uint64 acceptedInEpoch = 2;</code>
22783         */
22784        boolean hasAcceptedInEpoch();
22785        /**
22786         * <code>optional uint64 acceptedInEpoch = 2;</code>
22787         */
22788        long getAcceptedInEpoch();
22789    
22790        // required uint64 lastWriterEpoch = 3;
22791        /**
22792         * <code>required uint64 lastWriterEpoch = 3;</code>
22793         */
22794        boolean hasLastWriterEpoch();
22795        /**
22796         * <code>required uint64 lastWriterEpoch = 3;</code>
22797         */
22798        long getLastWriterEpoch();
22799    
22800        // optional uint64 lastCommittedTxId = 4;
22801        /**
22802         * <code>optional uint64 lastCommittedTxId = 4;</code>
22803         *
22804         * <pre>
22805         * The highest committed txid that this logger has ever seen.
22806         * This may be higher than the data it actually has, in the case
22807         * that it was lagging before the old writer crashed.
22808         * </pre>
22809         */
22810        boolean hasLastCommittedTxId();
22811        /**
22812         * <code>optional uint64 lastCommittedTxId = 4;</code>
22813         *
22814         * <pre>
22815         * The highest committed txid that this logger has ever seen.
22816         * This may be higher than the data it actually has, in the case
22817         * that it was lagging before the old writer crashed.
22818         * </pre>
22819         */
22820        long getLastCommittedTxId();
22821      }
22822      /**
22823       * Protobuf type {@code hadoop.hdfs.qjournal.PrepareRecoveryResponseProto}
22824       */
22825      public static final class PrepareRecoveryResponseProto extends
22826          com.google.protobuf.GeneratedMessage
22827          implements PrepareRecoveryResponseProtoOrBuilder {
22828        // Use PrepareRecoveryResponseProto.newBuilder() to construct.
22829        private PrepareRecoveryResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
22830          super(builder);
22831          this.unknownFields = builder.getUnknownFields();
22832        }
22833        private PrepareRecoveryResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
22834    
22835        private static final PrepareRecoveryResponseProto defaultInstance;
22836        public static PrepareRecoveryResponseProto getDefaultInstance() {
22837          return defaultInstance;
22838        }
22839    
22840        public PrepareRecoveryResponseProto getDefaultInstanceForType() {
22841          return defaultInstance;
22842        }
22843    
22844        private final com.google.protobuf.UnknownFieldSet unknownFields;
22845        @java.lang.Override
22846        public final com.google.protobuf.UnknownFieldSet
22847            getUnknownFields() {
22848          return this.unknownFields;
22849        }
22850        private PrepareRecoveryResponseProto(
22851            com.google.protobuf.CodedInputStream input,
22852            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
22853            throws com.google.protobuf.InvalidProtocolBufferException {
22854          initFields();
22855          int mutable_bitField0_ = 0;
22856          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
22857              com.google.protobuf.UnknownFieldSet.newBuilder();
22858          try {
22859            boolean done = false;
22860            while (!done) {
22861              int tag = input.readTag();
22862              switch (tag) {
22863                case 0:
22864                  done = true;
22865                  break;
22866                default: {
22867                  if (!parseUnknownField(input, unknownFields,
22868                                         extensionRegistry, tag)) {
22869                    done = true;
22870                  }
22871                  break;
22872                }
22873                case 10: {
22874                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder subBuilder = null;
22875                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
22876                    subBuilder = segmentState_.toBuilder();
22877                  }
22878                  segmentState_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.PARSER, extensionRegistry);
22879                  if (subBuilder != null) {
22880                    subBuilder.mergeFrom(segmentState_);
22881                    segmentState_ = subBuilder.buildPartial();
22882                  }
22883                  bitField0_ |= 0x00000001;
22884                  break;
22885                }
22886                case 16: {
22887                  bitField0_ |= 0x00000002;
22888                  acceptedInEpoch_ = input.readUInt64();
22889                  break;
22890                }
22891                case 24: {
22892                  bitField0_ |= 0x00000004;
22893                  lastWriterEpoch_ = input.readUInt64();
22894                  break;
22895                }
22896                case 32: {
22897                  bitField0_ |= 0x00000008;
22898                  lastCommittedTxId_ = input.readUInt64();
22899                  break;
22900                }
22901              }
22902            }
22903          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
22904            throw e.setUnfinishedMessage(this);
22905          } catch (java.io.IOException e) {
22906            throw new com.google.protobuf.InvalidProtocolBufferException(
22907                e.getMessage()).setUnfinishedMessage(this);
22908          } finally {
22909            this.unknownFields = unknownFields.build();
22910            makeExtensionsImmutable();
22911          }
22912        }
22913        public static final com.google.protobuf.Descriptors.Descriptor
22914            getDescriptor() {
22915          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PrepareRecoveryResponseProto_descriptor;
22916        }
22917    
22918        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
22919            internalGetFieldAccessorTable() {
22920          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PrepareRecoveryResponseProto_fieldAccessorTable
22921              .ensureFieldAccessorsInitialized(
22922                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.Builder.class);
22923        }
22924    
22925        public static com.google.protobuf.Parser<PrepareRecoveryResponseProto> PARSER =
22926            new com.google.protobuf.AbstractParser<PrepareRecoveryResponseProto>() {
22927          public PrepareRecoveryResponseProto parsePartialFrom(
22928              com.google.protobuf.CodedInputStream input,
22929              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
22930              throws com.google.protobuf.InvalidProtocolBufferException {
22931            return new PrepareRecoveryResponseProto(input, extensionRegistry);
22932          }
22933        };
22934    
22935        @java.lang.Override
22936        public com.google.protobuf.Parser<PrepareRecoveryResponseProto> getParserForType() {
22937          return PARSER;
22938        }
22939    
22940        private int bitField0_;
22941        // optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;
22942        public static final int SEGMENTSTATE_FIELD_NUMBER = 1;
22943        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_;
22944        /**
22945         * <code>optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
22946         */
22947        public boolean hasSegmentState() {
22948          return ((bitField0_ & 0x00000001) == 0x00000001);
22949        }
22950        /**
22951         * <code>optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
22952         */
22953        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
22954          return segmentState_;
22955        }
22956        /**
22957         * <code>optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
22958         */
22959        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
22960          return segmentState_;
22961        }
22962    
22963        // optional uint64 acceptedInEpoch = 2;
22964        public static final int ACCEPTEDINEPOCH_FIELD_NUMBER = 2;
22965        private long acceptedInEpoch_;
22966        /**
22967         * <code>optional uint64 acceptedInEpoch = 2;</code>
22968         */
22969        public boolean hasAcceptedInEpoch() {
22970          return ((bitField0_ & 0x00000002) == 0x00000002);
22971        }
22972        /**
22973         * <code>optional uint64 acceptedInEpoch = 2;</code>
22974         */
22975        public long getAcceptedInEpoch() {
22976          return acceptedInEpoch_;
22977        }
22978    
22979        // required uint64 lastWriterEpoch = 3;
22980        public static final int LASTWRITEREPOCH_FIELD_NUMBER = 3;
22981        private long lastWriterEpoch_;
22982        /**
22983         * <code>required uint64 lastWriterEpoch = 3;</code>
22984         */
22985        public boolean hasLastWriterEpoch() {
22986          return ((bitField0_ & 0x00000004) == 0x00000004);
22987        }
22988        /**
22989         * <code>required uint64 lastWriterEpoch = 3;</code>
22990         */
22991        public long getLastWriterEpoch() {
22992          return lastWriterEpoch_;
22993        }
22994    
22995        // optional uint64 lastCommittedTxId = 4;
22996        public static final int LASTCOMMITTEDTXID_FIELD_NUMBER = 4;
22997        private long lastCommittedTxId_;
22998        /**
22999         * <code>optional uint64 lastCommittedTxId = 4;</code>
23000         *
23001         * <pre>
23002         * The highest committed txid that this logger has ever seen.
23003         * This may be higher than the data it actually has, in the case
23004         * that it was lagging before the old writer crashed.
23005         * </pre>
23006         */
23007        public boolean hasLastCommittedTxId() {
23008          return ((bitField0_ & 0x00000008) == 0x00000008);
23009        }
23010        /**
23011         * <code>optional uint64 lastCommittedTxId = 4;</code>
23012         *
23013         * <pre>
23014         * The highest committed txid that this logger has ever seen.
23015         * This may be higher than the data it actually has, in the case
23016         * that it was lagging before the old writer crashed.
23017         * </pre>
23018         */
23019        public long getLastCommittedTxId() {
23020          return lastCommittedTxId_;
23021        }
23022    
23023        private void initFields() {
23024          segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
23025          acceptedInEpoch_ = 0L;
23026          lastWriterEpoch_ = 0L;
23027          lastCommittedTxId_ = 0L;
23028        }
23029        private byte memoizedIsInitialized = -1;
23030        public final boolean isInitialized() {
23031          byte isInitialized = memoizedIsInitialized;
23032          if (isInitialized != -1) return isInitialized == 1;
23033    
23034          if (!hasLastWriterEpoch()) {
23035            memoizedIsInitialized = 0;
23036            return false;
23037          }
23038          if (hasSegmentState()) {
23039            if (!getSegmentState().isInitialized()) {
23040              memoizedIsInitialized = 0;
23041              return false;
23042            }
23043          }
23044          memoizedIsInitialized = 1;
23045          return true;
23046        }
23047    
23048        public void writeTo(com.google.protobuf.CodedOutputStream output)
23049                            throws java.io.IOException {
23050          getSerializedSize();
23051          if (((bitField0_ & 0x00000001) == 0x00000001)) {
23052            output.writeMessage(1, segmentState_);
23053          }
23054          if (((bitField0_ & 0x00000002) == 0x00000002)) {
23055            output.writeUInt64(2, acceptedInEpoch_);
23056          }
23057          if (((bitField0_ & 0x00000004) == 0x00000004)) {
23058            output.writeUInt64(3, lastWriterEpoch_);
23059          }
23060          if (((bitField0_ & 0x00000008) == 0x00000008)) {
23061            output.writeUInt64(4, lastCommittedTxId_);
23062          }
23063          getUnknownFields().writeTo(output);
23064        }
23065    
23066        private int memoizedSerializedSize = -1;
23067        public int getSerializedSize() {
23068          int size = memoizedSerializedSize;
23069          if (size != -1) return size;
23070    
23071          size = 0;
23072          if (((bitField0_ & 0x00000001) == 0x00000001)) {
23073            size += com.google.protobuf.CodedOutputStream
23074              .computeMessageSize(1, segmentState_);
23075          }
23076          if (((bitField0_ & 0x00000002) == 0x00000002)) {
23077            size += com.google.protobuf.CodedOutputStream
23078              .computeUInt64Size(2, acceptedInEpoch_);
23079          }
23080          if (((bitField0_ & 0x00000004) == 0x00000004)) {
23081            size += com.google.protobuf.CodedOutputStream
23082              .computeUInt64Size(3, lastWriterEpoch_);
23083          }
23084          if (((bitField0_ & 0x00000008) == 0x00000008)) {
23085            size += com.google.protobuf.CodedOutputStream
23086              .computeUInt64Size(4, lastCommittedTxId_);
23087          }
23088          size += getUnknownFields().getSerializedSize();
23089          memoizedSerializedSize = size;
23090          return size;
23091        }
23092    
23093        private static final long serialVersionUID = 0L;
23094        @java.lang.Override
23095        protected java.lang.Object writeReplace()
23096            throws java.io.ObjectStreamException {
23097          return super.writeReplace();
23098        }
23099    
23100        @java.lang.Override
23101        public boolean equals(final java.lang.Object obj) {
23102          if (obj == this) {
23103           return true;
23104          }
23105          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto)) {
23106            return super.equals(obj);
23107          }
23108          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) obj;
23109    
23110          boolean result = true;
23111          result = result && (hasSegmentState() == other.hasSegmentState());
23112          if (hasSegmentState()) {
23113            result = result && getSegmentState()
23114                .equals(other.getSegmentState());
23115          }
23116          result = result && (hasAcceptedInEpoch() == other.hasAcceptedInEpoch());
23117          if (hasAcceptedInEpoch()) {
23118            result = result && (getAcceptedInEpoch()
23119                == other.getAcceptedInEpoch());
23120          }
23121          result = result && (hasLastWriterEpoch() == other.hasLastWriterEpoch());
23122          if (hasLastWriterEpoch()) {
23123            result = result && (getLastWriterEpoch()
23124                == other.getLastWriterEpoch());
23125          }
23126          result = result && (hasLastCommittedTxId() == other.hasLastCommittedTxId());
23127          if (hasLastCommittedTxId()) {
23128            result = result && (getLastCommittedTxId()
23129                == other.getLastCommittedTxId());
23130          }
23131          result = result &&
23132              getUnknownFields().equals(other.getUnknownFields());
23133          return result;
23134        }
23135    
23136        private int memoizedHashCode = 0;
23137        @java.lang.Override
23138        public int hashCode() {
23139          if (memoizedHashCode != 0) {
23140            return memoizedHashCode;
23141          }
23142          int hash = 41;
23143          hash = (19 * hash) + getDescriptorForType().hashCode();
23144          if (hasSegmentState()) {
23145            hash = (37 * hash) + SEGMENTSTATE_FIELD_NUMBER;
23146            hash = (53 * hash) + getSegmentState().hashCode();
23147          }
23148          if (hasAcceptedInEpoch()) {
23149            hash = (37 * hash) + ACCEPTEDINEPOCH_FIELD_NUMBER;
23150            hash = (53 * hash) + hashLong(getAcceptedInEpoch());
23151          }
23152          if (hasLastWriterEpoch()) {
23153            hash = (37 * hash) + LASTWRITEREPOCH_FIELD_NUMBER;
23154            hash = (53 * hash) + hashLong(getLastWriterEpoch());
23155          }
23156          if (hasLastCommittedTxId()) {
23157            hash = (37 * hash) + LASTCOMMITTEDTXID_FIELD_NUMBER;
23158            hash = (53 * hash) + hashLong(getLastCommittedTxId());
23159          }
23160          hash = (29 * hash) + getUnknownFields().hashCode();
23161          memoizedHashCode = hash;
23162          return hash;
23163        }
23164    
23165        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
23166            com.google.protobuf.ByteString data)
23167            throws com.google.protobuf.InvalidProtocolBufferException {
23168          return PARSER.parseFrom(data);
23169        }
23170        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
23171            com.google.protobuf.ByteString data,
23172            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
23173            throws com.google.protobuf.InvalidProtocolBufferException {
23174          return PARSER.parseFrom(data, extensionRegistry);
23175        }
23176        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(byte[] data)
23177            throws com.google.protobuf.InvalidProtocolBufferException {
23178          return PARSER.parseFrom(data);
23179        }
23180        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
23181            byte[] data,
23182            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
23183            throws com.google.protobuf.InvalidProtocolBufferException {
23184          return PARSER.parseFrom(data, extensionRegistry);
23185        }
23186        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(java.io.InputStream input)
23187            throws java.io.IOException {
23188          return PARSER.parseFrom(input);
23189        }
23190        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
23191            java.io.InputStream input,
23192            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
23193            throws java.io.IOException {
23194          return PARSER.parseFrom(input, extensionRegistry);
23195        }
23196        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseDelimitedFrom(java.io.InputStream input)
23197            throws java.io.IOException {
23198          return PARSER.parseDelimitedFrom(input);
23199        }
23200        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseDelimitedFrom(
23201            java.io.InputStream input,
23202            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
23203            throws java.io.IOException {
23204          return PARSER.parseDelimitedFrom(input, extensionRegistry);
23205        }
23206        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
23207            com.google.protobuf.CodedInputStream input)
23208            throws java.io.IOException {
23209          return PARSER.parseFrom(input);
23210        }
23211        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
23212            com.google.protobuf.CodedInputStream input,
23213            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
23214            throws java.io.IOException {
23215          return PARSER.parseFrom(input, extensionRegistry);
23216        }
23217    
23218        public static Builder newBuilder() { return Builder.create(); }
23219        public Builder newBuilderForType() { return newBuilder(); }
23220        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto prototype) {
23221          return newBuilder().mergeFrom(prototype);
23222        }
23223        public Builder toBuilder() { return newBuilder(this); }
23224    
23225        @java.lang.Override
23226        protected Builder newBuilderForType(
23227            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
23228          Builder builder = new Builder(parent);
23229          return builder;
23230        }
23231        /**
23232         * Protobuf type {@code hadoop.hdfs.qjournal.PrepareRecoveryResponseProto}
23233         */
23234        public static final class Builder extends
23235            com.google.protobuf.GeneratedMessage.Builder<Builder>
23236           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProtoOrBuilder {
23237          public static final com.google.protobuf.Descriptors.Descriptor
23238              getDescriptor() {
23239            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PrepareRecoveryResponseProto_descriptor;
23240          }
23241    
23242          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
23243              internalGetFieldAccessorTable() {
23244            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PrepareRecoveryResponseProto_fieldAccessorTable
23245                .ensureFieldAccessorsInitialized(
23246                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.Builder.class);
23247          }
23248    
23249          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.newBuilder()
23250          private Builder() {
23251            maybeForceBuilderInitialization();
23252          }
23253    
23254          private Builder(
23255              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
23256            super(parent);
23257            maybeForceBuilderInitialization();
23258          }
23259          private void maybeForceBuilderInitialization() {
23260            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
23261              getSegmentStateFieldBuilder();
23262            }
23263          }
23264          private static Builder create() {
23265            return new Builder();
23266          }
23267    
23268          public Builder clear() {
23269            super.clear();
23270            if (segmentStateBuilder_ == null) {
23271              segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
23272            } else {
23273              segmentStateBuilder_.clear();
23274            }
23275            bitField0_ = (bitField0_ & ~0x00000001);
23276            acceptedInEpoch_ = 0L;
23277            bitField0_ = (bitField0_ & ~0x00000002);
23278            lastWriterEpoch_ = 0L;
23279            bitField0_ = (bitField0_ & ~0x00000004);
23280            lastCommittedTxId_ = 0L;
23281            bitField0_ = (bitField0_ & ~0x00000008);
23282            return this;
23283          }
23284    
23285          public Builder clone() {
23286            return create().mergeFrom(buildPartial());
23287          }
23288    
23289          public com.google.protobuf.Descriptors.Descriptor
23290              getDescriptorForType() {
23291            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_PrepareRecoveryResponseProto_descriptor;
23292          }
23293    
23294          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto getDefaultInstanceForType() {
23295            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance();
23296          }
23297    
23298          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto build() {
23299            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto result = buildPartial();
23300            if (!result.isInitialized()) {
23301              throw newUninitializedMessageException(result);
23302            }
23303            return result;
23304          }
23305    
23306          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto buildPartial() {
23307            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto(this);
23308            int from_bitField0_ = bitField0_;
23309            int to_bitField0_ = 0;
23310            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
23311              to_bitField0_ |= 0x00000001;
23312            }
23313            if (segmentStateBuilder_ == null) {
23314              result.segmentState_ = segmentState_;
23315            } else {
23316              result.segmentState_ = segmentStateBuilder_.build();
23317            }
23318            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
23319              to_bitField0_ |= 0x00000002;
23320            }
23321            result.acceptedInEpoch_ = acceptedInEpoch_;
23322            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
23323              to_bitField0_ |= 0x00000004;
23324            }
23325            result.lastWriterEpoch_ = lastWriterEpoch_;
23326            if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
23327              to_bitField0_ |= 0x00000008;
23328            }
23329            result.lastCommittedTxId_ = lastCommittedTxId_;
23330            result.bitField0_ = to_bitField0_;
23331            onBuilt();
23332            return result;
23333          }
23334    
23335          public Builder mergeFrom(com.google.protobuf.Message other) {
23336            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) {
23337              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto)other);
23338            } else {
23339              super.mergeFrom(other);
23340              return this;
23341            }
23342          }
23343    
23344          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto other) {
23345            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance()) return this;
23346            if (other.hasSegmentState()) {
23347              mergeSegmentState(other.getSegmentState());
23348            }
23349            if (other.hasAcceptedInEpoch()) {
23350              setAcceptedInEpoch(other.getAcceptedInEpoch());
23351            }
23352            if (other.hasLastWriterEpoch()) {
23353              setLastWriterEpoch(other.getLastWriterEpoch());
23354            }
23355            if (other.hasLastCommittedTxId()) {
23356              setLastCommittedTxId(other.getLastCommittedTxId());
23357            }
23358            this.mergeUnknownFields(other.getUnknownFields());
23359            return this;
23360          }
23361    
23362          public final boolean isInitialized() {
23363            if (!hasLastWriterEpoch()) {
23364              
23365              return false;
23366            }
23367            if (hasSegmentState()) {
23368              if (!getSegmentState().isInitialized()) {
23369                
23370                return false;
23371              }
23372            }
23373            return true;
23374          }
23375    
23376          public Builder mergeFrom(
23377              com.google.protobuf.CodedInputStream input,
23378              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
23379              throws java.io.IOException {
23380            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parsedMessage = null;
23381            try {
23382              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
23383            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
23384              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) e.getUnfinishedMessage();
23385              throw e;
23386            } finally {
23387              if (parsedMessage != null) {
23388                mergeFrom(parsedMessage);
23389              }
23390            }
23391            return this;
23392          }
23393          private int bitField0_;
23394    
23395          // optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;
23396          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
23397          private com.google.protobuf.SingleFieldBuilder<
23398              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> segmentStateBuilder_;
23399          /**
23400           * <code>optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
23401           */
23402          public boolean hasSegmentState() {
23403            return ((bitField0_ & 0x00000001) == 0x00000001);
23404          }
23405          /**
23406           * <code>optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
23407           */
23408          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
23409            if (segmentStateBuilder_ == null) {
23410              return segmentState_;
23411            } else {
23412              return segmentStateBuilder_.getMessage();
23413            }
23414          }
23415          /**
23416           * <code>optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
23417           */
23418          public Builder setSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
23419            if (segmentStateBuilder_ == null) {
23420              if (value == null) {
23421                throw new NullPointerException();
23422              }
23423              segmentState_ = value;
23424              onChanged();
23425            } else {
23426              segmentStateBuilder_.setMessage(value);
23427            }
23428            bitField0_ |= 0x00000001;
23429            return this;
23430          }
23431          /**
23432           * <code>optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
23433           */
23434          public Builder setSegmentState(
23435              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) {
23436            if (segmentStateBuilder_ == null) {
23437              segmentState_ = builderForValue.build();
23438              onChanged();
23439            } else {
23440              segmentStateBuilder_.setMessage(builderForValue.build());
23441            }
23442            bitField0_ |= 0x00000001;
23443            return this;
23444          }
23445          /**
23446           * <code>optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
23447           */
23448          public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
23449            if (segmentStateBuilder_ == null) {
23450              if (((bitField0_ & 0x00000001) == 0x00000001) &&
23451                  segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
23452                segmentState_ =
23453                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial();
23454              } else {
23455                segmentState_ = value;
23456              }
23457              onChanged();
23458            } else {
23459              segmentStateBuilder_.mergeFrom(value);
23460            }
23461            bitField0_ |= 0x00000001;
23462            return this;
23463          }
23464          /**
23465           * <code>optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
23466           */
23467          public Builder clearSegmentState() {
23468            if (segmentStateBuilder_ == null) {
23469              segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
23470              onChanged();
23471            } else {
23472              segmentStateBuilder_.clear();
23473            }
23474            bitField0_ = (bitField0_ & ~0x00000001);
23475            return this;
23476          }
23477          /**
23478           * <code>optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
23479           */
23480          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getSegmentStateBuilder() {
23481            bitField0_ |= 0x00000001;
23482            onChanged();
23483            return getSegmentStateFieldBuilder().getBuilder();
23484          }
23485          /**
23486           * <code>optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
23487           */
23488          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
23489            if (segmentStateBuilder_ != null) {
23490              return segmentStateBuilder_.getMessageOrBuilder();
23491            } else {
23492              return segmentState_;
23493            }
23494          }
23495          /**
23496           * <code>optional .hadoop.hdfs.qjournal.SegmentStateProto segmentState = 1;</code>
23497           */
23498          private com.google.protobuf.SingleFieldBuilder<
23499              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> 
23500              getSegmentStateFieldBuilder() {
23501            if (segmentStateBuilder_ == null) {
23502              segmentStateBuilder_ = new com.google.protobuf.SingleFieldBuilder<
23503                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>(
23504                      segmentState_,
23505                      getParentForChildren(),
23506                      isClean());
23507              segmentState_ = null;
23508            }
23509            return segmentStateBuilder_;
23510          }
23511    
23512          // optional uint64 acceptedInEpoch = 2;
23513          private long acceptedInEpoch_ ;
23514          /**
23515           * <code>optional uint64 acceptedInEpoch = 2;</code>
23516           */
23517          public boolean hasAcceptedInEpoch() {
23518            return ((bitField0_ & 0x00000002) == 0x00000002);
23519          }
23520          /**
23521           * <code>optional uint64 acceptedInEpoch = 2;</code>
23522           */
23523          public long getAcceptedInEpoch() {
23524            return acceptedInEpoch_;
23525          }
23526          /**
23527           * <code>optional uint64 acceptedInEpoch = 2;</code>
23528           */
23529          public Builder setAcceptedInEpoch(long value) {
23530            bitField0_ |= 0x00000002;
23531            acceptedInEpoch_ = value;
23532            onChanged();
23533            return this;
23534          }
23535          /**
23536           * <code>optional uint64 acceptedInEpoch = 2;</code>
23537           */
23538          public Builder clearAcceptedInEpoch() {
23539            bitField0_ = (bitField0_ & ~0x00000002);
23540            acceptedInEpoch_ = 0L;
23541            onChanged();
23542            return this;
23543          }
23544    
23545          // required uint64 lastWriterEpoch = 3;
23546          private long lastWriterEpoch_ ;
23547          /**
23548           * <code>required uint64 lastWriterEpoch = 3;</code>
23549           */
23550          public boolean hasLastWriterEpoch() {
23551            return ((bitField0_ & 0x00000004) == 0x00000004);
23552          }
23553          /**
23554           * <code>required uint64 lastWriterEpoch = 3;</code>
23555           */
23556          public long getLastWriterEpoch() {
23557            return lastWriterEpoch_;
23558          }
23559          /**
23560           * <code>required uint64 lastWriterEpoch = 3;</code>
23561           */
23562          public Builder setLastWriterEpoch(long value) {
23563            bitField0_ |= 0x00000004;
23564            lastWriterEpoch_ = value;
23565            onChanged();
23566            return this;
23567          }
23568          /**
23569           * <code>required uint64 lastWriterEpoch = 3;</code>
23570           */
23571          public Builder clearLastWriterEpoch() {
23572            bitField0_ = (bitField0_ & ~0x00000004);
23573            lastWriterEpoch_ = 0L;
23574            onChanged();
23575            return this;
23576          }
23577    
23578          // optional uint64 lastCommittedTxId = 4;
23579          private long lastCommittedTxId_ ;
23580          /**
23581           * <code>optional uint64 lastCommittedTxId = 4;</code>
23582           *
23583           * <pre>
23584           * The highest committed txid that this logger has ever seen.
23585           * This may be higher than the data it actually has, in the case
23586           * that it was lagging before the old writer crashed.
23587           * </pre>
23588           */
23589          public boolean hasLastCommittedTxId() {
23590            return ((bitField0_ & 0x00000008) == 0x00000008);
23591          }
23592          /**
23593           * <code>optional uint64 lastCommittedTxId = 4;</code>
23594           *
23595           * <pre>
23596           * The highest committed txid that this logger has ever seen.
23597           * This may be higher than the data it actually has, in the case
23598           * that it was lagging before the old writer crashed.
23599           * </pre>
23600           */
23601          public long getLastCommittedTxId() {
23602            return lastCommittedTxId_;
23603          }
23604          /**
23605           * <code>optional uint64 lastCommittedTxId = 4;</code>
23606           *
23607           * <pre>
23608           * The highest committed txid that this logger has ever seen.
23609           * This may be higher than the data it actually has, in the case
23610           * that it was lagging before the old writer crashed.
23611           * </pre>
23612           */
23613          public Builder setLastCommittedTxId(long value) {
23614            bitField0_ |= 0x00000008;
23615            lastCommittedTxId_ = value;
23616            onChanged();
23617            return this;
23618          }
23619          /**
23620           * <code>optional uint64 lastCommittedTxId = 4;</code>
23621           *
23622           * <pre>
23623           * The highest committed txid that this logger has ever seen.
23624           * This may be higher than the data it actually has, in the case
23625           * that it was lagging before the old writer crashed.
23626           * </pre>
23627           */
23628          public Builder clearLastCommittedTxId() {
23629            bitField0_ = (bitField0_ & ~0x00000008);
23630            lastCommittedTxId_ = 0L;
23631            onChanged();
23632            return this;
23633          }
23634    
23635          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.PrepareRecoveryResponseProto)
23636        }
23637    
23638        static {
23639          defaultInstance = new PrepareRecoveryResponseProto(true);
23640          defaultInstance.initFields();
23641        }
23642    
23643        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.PrepareRecoveryResponseProto)
23644      }
23645    
23646      public interface AcceptRecoveryRequestProtoOrBuilder
23647          extends com.google.protobuf.MessageOrBuilder {
23648    
23649        // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;
23650        /**
23651         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
23652         */
23653        boolean hasReqInfo();
23654        /**
23655         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
23656         */
23657        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
23658        /**
23659         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
23660         */
23661        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
23662    
23663        // required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;
23664        /**
23665         * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code>
23666         *
23667         * <pre>
23668         ** Details on the segment to recover 
23669         * </pre>
23670         */
23671        boolean hasStateToAccept();
23672        /**
23673         * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code>
23674         *
23675         * <pre>
23676         ** Details on the segment to recover 
23677         * </pre>
23678         */
23679        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getStateToAccept();
23680        /**
23681         * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code>
23682         *
23683         * <pre>
23684         ** Details on the segment to recover 
23685         * </pre>
23686         */
23687        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getStateToAcceptOrBuilder();
23688    
23689        // required string fromURL = 3;
23690        /**
23691         * <code>required string fromURL = 3;</code>
23692         *
23693         * <pre>
23694         ** The URL from which the log may be copied 
23695         * </pre>
23696         */
23697        boolean hasFromURL();
23698        /**
23699         * <code>required string fromURL = 3;</code>
23700         *
23701         * <pre>
23702         ** The URL from which the log may be copied 
23703         * </pre>
23704         */
23705        java.lang.String getFromURL();
23706        /**
23707         * <code>required string fromURL = 3;</code>
23708         *
23709         * <pre>
23710         ** The URL from which the log may be copied 
23711         * </pre>
23712         */
23713        com.google.protobuf.ByteString
23714            getFromURLBytes();
23715      }
23716      /**
23717       * Protobuf type {@code hadoop.hdfs.qjournal.AcceptRecoveryRequestProto}
23718       *
23719       * <pre>
23720       **
23721       * acceptRecovery()
23722       * </pre>
23723       */
23724      public static final class AcceptRecoveryRequestProto extends
23725          com.google.protobuf.GeneratedMessage
23726          implements AcceptRecoveryRequestProtoOrBuilder {
23727        // Use AcceptRecoveryRequestProto.newBuilder() to construct.
23728        private AcceptRecoveryRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
23729          super(builder);
23730          this.unknownFields = builder.getUnknownFields();
23731        }
23732        private AcceptRecoveryRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
23733    
23734        private static final AcceptRecoveryRequestProto defaultInstance;
23735        public static AcceptRecoveryRequestProto getDefaultInstance() {
23736          return defaultInstance;
23737        }
23738    
23739        public AcceptRecoveryRequestProto getDefaultInstanceForType() {
23740          return defaultInstance;
23741        }
23742    
23743        private final com.google.protobuf.UnknownFieldSet unknownFields;
23744        @java.lang.Override
23745        public final com.google.protobuf.UnknownFieldSet
23746            getUnknownFields() {
23747          return this.unknownFields;
23748        }
23749        private AcceptRecoveryRequestProto(
23750            com.google.protobuf.CodedInputStream input,
23751            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
23752            throws com.google.protobuf.InvalidProtocolBufferException {
23753          initFields();
23754          int mutable_bitField0_ = 0;
23755          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
23756              com.google.protobuf.UnknownFieldSet.newBuilder();
23757          try {
23758            boolean done = false;
23759            while (!done) {
23760              int tag = input.readTag();
23761              switch (tag) {
23762                case 0:
23763                  done = true;
23764                  break;
23765                default: {
23766                  if (!parseUnknownField(input, unknownFields,
23767                                         extensionRegistry, tag)) {
23768                    done = true;
23769                  }
23770                  break;
23771                }
23772                case 10: {
23773                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = null;
23774                  if (((bitField0_ & 0x00000001) == 0x00000001)) {
23775                    subBuilder = reqInfo_.toBuilder();
23776                  }
23777                  reqInfo_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.PARSER, extensionRegistry);
23778                  if (subBuilder != null) {
23779                    subBuilder.mergeFrom(reqInfo_);
23780                    reqInfo_ = subBuilder.buildPartial();
23781                  }
23782                  bitField0_ |= 0x00000001;
23783                  break;
23784                }
23785                case 18: {
23786                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder subBuilder = null;
23787                  if (((bitField0_ & 0x00000002) == 0x00000002)) {
23788                    subBuilder = stateToAccept_.toBuilder();
23789                  }
23790                  stateToAccept_ = input.readMessage(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.PARSER, extensionRegistry);
23791                  if (subBuilder != null) {
23792                    subBuilder.mergeFrom(stateToAccept_);
23793                    stateToAccept_ = subBuilder.buildPartial();
23794                  }
23795                  bitField0_ |= 0x00000002;
23796                  break;
23797                }
23798                case 26: {
23799                  bitField0_ |= 0x00000004;
23800                  fromURL_ = input.readBytes();
23801                  break;
23802                }
23803              }
23804            }
23805          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
23806            throw e.setUnfinishedMessage(this);
23807          } catch (java.io.IOException e) {
23808            throw new com.google.protobuf.InvalidProtocolBufferException(
23809                e.getMessage()).setUnfinishedMessage(this);
23810          } finally {
23811            this.unknownFields = unknownFields.build();
23812            makeExtensionsImmutable();
23813          }
23814        }
23815        public static final com.google.protobuf.Descriptors.Descriptor
23816            getDescriptor() {
23817          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_AcceptRecoveryRequestProto_descriptor;
23818        }
23819    
23820        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
23821            internalGetFieldAccessorTable() {
23822          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_AcceptRecoveryRequestProto_fieldAccessorTable
23823              .ensureFieldAccessorsInitialized(
23824                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.Builder.class);
23825        }
23826    
23827        public static com.google.protobuf.Parser<AcceptRecoveryRequestProto> PARSER =
23828            new com.google.protobuf.AbstractParser<AcceptRecoveryRequestProto>() {
23829          public AcceptRecoveryRequestProto parsePartialFrom(
23830              com.google.protobuf.CodedInputStream input,
23831              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
23832              throws com.google.protobuf.InvalidProtocolBufferException {
23833            return new AcceptRecoveryRequestProto(input, extensionRegistry);
23834          }
23835        };
23836    
23837        @java.lang.Override
23838        public com.google.protobuf.Parser<AcceptRecoveryRequestProto> getParserForType() {
23839          return PARSER;
23840        }
23841    
23842        private int bitField0_;
23843        // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;
23844        public static final int REQINFO_FIELD_NUMBER = 1;
23845        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
23846        /**
23847         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
23848         */
23849        public boolean hasReqInfo() {
23850          return ((bitField0_ & 0x00000001) == 0x00000001);
23851        }
23852        /**
23853         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
23854         */
23855        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
23856          return reqInfo_;
23857        }
23858        /**
23859         * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
23860         */
23861        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
23862          return reqInfo_;
23863        }
23864    
23865        // required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;
23866        public static final int STATETOACCEPT_FIELD_NUMBER = 2;
23867        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto stateToAccept_;
23868        /**
23869         * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code>
23870         *
23871         * <pre>
23872         ** Details on the segment to recover 
23873         * </pre>
23874         */
23875        public boolean hasStateToAccept() {
23876          return ((bitField0_ & 0x00000002) == 0x00000002);
23877        }
23878        /**
23879         * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code>
23880         *
23881         * <pre>
23882         ** Details on the segment to recover 
23883         * </pre>
23884         */
23885        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getStateToAccept() {
23886          return stateToAccept_;
23887        }
23888        /**
23889         * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code>
23890         *
23891         * <pre>
23892         ** Details on the segment to recover 
23893         * </pre>
23894         */
23895        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getStateToAcceptOrBuilder() {
23896          return stateToAccept_;
23897        }
23898    
23899        // required string fromURL = 3;
23900        public static final int FROMURL_FIELD_NUMBER = 3;
23901        private java.lang.Object fromURL_;
23902        /**
23903         * <code>required string fromURL = 3;</code>
23904         *
23905         * <pre>
23906         ** The URL from which the log may be copied 
23907         * </pre>
23908         */
23909        public boolean hasFromURL() {
23910          return ((bitField0_ & 0x00000004) == 0x00000004);
23911        }
23912        /**
23913         * <code>required string fromURL = 3;</code>
23914         *
23915         * <pre>
23916         ** The URL from which the log may be copied 
23917         * </pre>
23918         */
23919        public java.lang.String getFromURL() {
23920          java.lang.Object ref = fromURL_;
23921          if (ref instanceof java.lang.String) {
23922            return (java.lang.String) ref;
23923          } else {
23924            com.google.protobuf.ByteString bs = 
23925                (com.google.protobuf.ByteString) ref;
23926            java.lang.String s = bs.toStringUtf8();
23927            if (bs.isValidUtf8()) {
23928              fromURL_ = s;
23929            }
23930            return s;
23931          }
23932        }
23933        /**
23934         * <code>required string fromURL = 3;</code>
23935         *
23936         * <pre>
23937         ** The URL from which the log may be copied 
23938         * </pre>
23939         */
23940        public com.google.protobuf.ByteString
23941            getFromURLBytes() {
23942          java.lang.Object ref = fromURL_;
23943          if (ref instanceof java.lang.String) {
23944            com.google.protobuf.ByteString b = 
23945                com.google.protobuf.ByteString.copyFromUtf8(
23946                    (java.lang.String) ref);
23947            fromURL_ = b;
23948            return b;
23949          } else {
23950            return (com.google.protobuf.ByteString) ref;
23951          }
23952        }
23953    
23954        private void initFields() {
23955          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
23956          stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
23957          fromURL_ = "";
23958        }
23959        private byte memoizedIsInitialized = -1;
23960        public final boolean isInitialized() {
23961          byte isInitialized = memoizedIsInitialized;
23962          if (isInitialized != -1) return isInitialized == 1;
23963    
23964          if (!hasReqInfo()) {
23965            memoizedIsInitialized = 0;
23966            return false;
23967          }
23968          if (!hasStateToAccept()) {
23969            memoizedIsInitialized = 0;
23970            return false;
23971          }
23972          if (!hasFromURL()) {
23973            memoizedIsInitialized = 0;
23974            return false;
23975          }
23976          if (!getReqInfo().isInitialized()) {
23977            memoizedIsInitialized = 0;
23978            return false;
23979          }
23980          if (!getStateToAccept().isInitialized()) {
23981            memoizedIsInitialized = 0;
23982            return false;
23983          }
23984          memoizedIsInitialized = 1;
23985          return true;
23986        }
23987    
23988        public void writeTo(com.google.protobuf.CodedOutputStream output)
23989                            throws java.io.IOException {
23990          getSerializedSize();
23991          if (((bitField0_ & 0x00000001) == 0x00000001)) {
23992            output.writeMessage(1, reqInfo_);
23993          }
23994          if (((bitField0_ & 0x00000002) == 0x00000002)) {
23995            output.writeMessage(2, stateToAccept_);
23996          }
23997          if (((bitField0_ & 0x00000004) == 0x00000004)) {
23998            output.writeBytes(3, getFromURLBytes());
23999          }
24000          getUnknownFields().writeTo(output);
24001        }
24002    
24003        private int memoizedSerializedSize = -1;
24004        public int getSerializedSize() {
24005          int size = memoizedSerializedSize;
24006          if (size != -1) return size;
24007    
24008          size = 0;
24009          if (((bitField0_ & 0x00000001) == 0x00000001)) {
24010            size += com.google.protobuf.CodedOutputStream
24011              .computeMessageSize(1, reqInfo_);
24012          }
24013          if (((bitField0_ & 0x00000002) == 0x00000002)) {
24014            size += com.google.protobuf.CodedOutputStream
24015              .computeMessageSize(2, stateToAccept_);
24016          }
24017          if (((bitField0_ & 0x00000004) == 0x00000004)) {
24018            size += com.google.protobuf.CodedOutputStream
24019              .computeBytesSize(3, getFromURLBytes());
24020          }
24021          size += getUnknownFields().getSerializedSize();
24022          memoizedSerializedSize = size;
24023          return size;
24024        }
24025    
24026        private static final long serialVersionUID = 0L;
24027        @java.lang.Override
24028        protected java.lang.Object writeReplace()
24029            throws java.io.ObjectStreamException {
24030          return super.writeReplace();
24031        }
24032    
24033        @java.lang.Override
24034        public boolean equals(final java.lang.Object obj) {
24035          if (obj == this) {
24036           return true;
24037          }
24038          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)) {
24039            return super.equals(obj);
24040          }
24041          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto) obj;
24042    
24043          boolean result = true;
24044          result = result && (hasReqInfo() == other.hasReqInfo());
24045          if (hasReqInfo()) {
24046            result = result && getReqInfo()
24047                .equals(other.getReqInfo());
24048          }
24049          result = result && (hasStateToAccept() == other.hasStateToAccept());
24050          if (hasStateToAccept()) {
24051            result = result && getStateToAccept()
24052                .equals(other.getStateToAccept());
24053          }
24054          result = result && (hasFromURL() == other.hasFromURL());
24055          if (hasFromURL()) {
24056            result = result && getFromURL()
24057                .equals(other.getFromURL());
24058          }
24059          result = result &&
24060              getUnknownFields().equals(other.getUnknownFields());
24061          return result;
24062        }
24063    
24064        private int memoizedHashCode = 0;
24065        @java.lang.Override
24066        public int hashCode() {
24067          if (memoizedHashCode != 0) {
24068            return memoizedHashCode;
24069          }
24070          int hash = 41;
24071          hash = (19 * hash) + getDescriptorForType().hashCode();
24072          if (hasReqInfo()) {
24073            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
24074            hash = (53 * hash) + getReqInfo().hashCode();
24075          }
24076          if (hasStateToAccept()) {
24077            hash = (37 * hash) + STATETOACCEPT_FIELD_NUMBER;
24078            hash = (53 * hash) + getStateToAccept().hashCode();
24079          }
24080          if (hasFromURL()) {
24081            hash = (37 * hash) + FROMURL_FIELD_NUMBER;
24082            hash = (53 * hash) + getFromURL().hashCode();
24083          }
24084          hash = (29 * hash) + getUnknownFields().hashCode();
24085          memoizedHashCode = hash;
24086          return hash;
24087        }
24088    
24089        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
24090            com.google.protobuf.ByteString data)
24091            throws com.google.protobuf.InvalidProtocolBufferException {
24092          return PARSER.parseFrom(data);
24093        }
24094        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
24095            com.google.protobuf.ByteString data,
24096            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
24097            throws com.google.protobuf.InvalidProtocolBufferException {
24098          return PARSER.parseFrom(data, extensionRegistry);
24099        }
24100        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(byte[] data)
24101            throws com.google.protobuf.InvalidProtocolBufferException {
24102          return PARSER.parseFrom(data);
24103        }
24104        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
24105            byte[] data,
24106            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
24107            throws com.google.protobuf.InvalidProtocolBufferException {
24108          return PARSER.parseFrom(data, extensionRegistry);
24109        }
24110        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(java.io.InputStream input)
24111            throws java.io.IOException {
24112          return PARSER.parseFrom(input);
24113        }
24114        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
24115            java.io.InputStream input,
24116            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
24117            throws java.io.IOException {
24118          return PARSER.parseFrom(input, extensionRegistry);
24119        }
24120        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseDelimitedFrom(java.io.InputStream input)
24121            throws java.io.IOException {
24122          return PARSER.parseDelimitedFrom(input);
24123        }
24124        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseDelimitedFrom(
24125            java.io.InputStream input,
24126            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
24127            throws java.io.IOException {
24128          return PARSER.parseDelimitedFrom(input, extensionRegistry);
24129        }
24130        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
24131            com.google.protobuf.CodedInputStream input)
24132            throws java.io.IOException {
24133          return PARSER.parseFrom(input);
24134        }
24135        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
24136            com.google.protobuf.CodedInputStream input,
24137            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
24138            throws java.io.IOException {
24139          return PARSER.parseFrom(input, extensionRegistry);
24140        }
24141    
24142        public static Builder newBuilder() { return Builder.create(); }
24143        public Builder newBuilderForType() { return newBuilder(); }
24144        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto prototype) {
24145          return newBuilder().mergeFrom(prototype);
24146        }
24147        public Builder toBuilder() { return newBuilder(this); }
24148    
24149        @java.lang.Override
24150        protected Builder newBuilderForType(
24151            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
24152          Builder builder = new Builder(parent);
24153          return builder;
24154        }
24155        /**
24156         * Protobuf type {@code hadoop.hdfs.qjournal.AcceptRecoveryRequestProto}
24157         *
24158         * <pre>
24159         **
24160         * acceptRecovery()
24161         * </pre>
24162         */
24163        public static final class Builder extends
24164            com.google.protobuf.GeneratedMessage.Builder<Builder>
24165           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProtoOrBuilder {
24166          public static final com.google.protobuf.Descriptors.Descriptor
24167              getDescriptor() {
24168            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_AcceptRecoveryRequestProto_descriptor;
24169          }
24170    
24171          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
24172              internalGetFieldAccessorTable() {
24173            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_AcceptRecoveryRequestProto_fieldAccessorTable
24174                .ensureFieldAccessorsInitialized(
24175                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.Builder.class);
24176          }
24177    
24178          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.newBuilder()
24179          private Builder() {
24180            maybeForceBuilderInitialization();
24181          }
24182    
24183          private Builder(
24184              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
24185            super(parent);
24186            maybeForceBuilderInitialization();
24187          }
24188          private void maybeForceBuilderInitialization() {
24189            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
24190              getReqInfoFieldBuilder();
24191              getStateToAcceptFieldBuilder();
24192            }
24193          }
24194          private static Builder create() {
24195            return new Builder();
24196          }
24197    
24198          public Builder clear() {
24199            super.clear();
24200            if (reqInfoBuilder_ == null) {
24201              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
24202            } else {
24203              reqInfoBuilder_.clear();
24204            }
24205            bitField0_ = (bitField0_ & ~0x00000001);
24206            if (stateToAcceptBuilder_ == null) {
24207              stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
24208            } else {
24209              stateToAcceptBuilder_.clear();
24210            }
24211            bitField0_ = (bitField0_ & ~0x00000002);
24212            fromURL_ = "";
24213            bitField0_ = (bitField0_ & ~0x00000004);
24214            return this;
24215          }
24216    
24217          public Builder clone() {
24218            return create().mergeFrom(buildPartial());
24219          }
24220    
24221          public com.google.protobuf.Descriptors.Descriptor
24222              getDescriptorForType() {
24223            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_AcceptRecoveryRequestProto_descriptor;
24224          }
24225    
24226          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto getDefaultInstanceForType() {
24227            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance();
24228          }
24229    
24230          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto build() {
24231            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto result = buildPartial();
24232            if (!result.isInitialized()) {
24233              throw newUninitializedMessageException(result);
24234            }
24235            return result;
24236          }
24237    
24238          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto buildPartial() {
24239            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto(this);
24240            int from_bitField0_ = bitField0_;
24241            int to_bitField0_ = 0;
24242            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
24243              to_bitField0_ |= 0x00000001;
24244            }
24245            if (reqInfoBuilder_ == null) {
24246              result.reqInfo_ = reqInfo_;
24247            } else {
24248              result.reqInfo_ = reqInfoBuilder_.build();
24249            }
24250            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
24251              to_bitField0_ |= 0x00000002;
24252            }
24253            if (stateToAcceptBuilder_ == null) {
24254              result.stateToAccept_ = stateToAccept_;
24255            } else {
24256              result.stateToAccept_ = stateToAcceptBuilder_.build();
24257            }
24258            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
24259              to_bitField0_ |= 0x00000004;
24260            }
24261            result.fromURL_ = fromURL_;
24262            result.bitField0_ = to_bitField0_;
24263            onBuilt();
24264            return result;
24265          }
24266    
24267          public Builder mergeFrom(com.google.protobuf.Message other) {
24268            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto) {
24269              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)other);
24270            } else {
24271              super.mergeFrom(other);
24272              return this;
24273            }
24274          }
24275    
24276          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto other) {
24277            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance()) return this;
24278            if (other.hasReqInfo()) {
24279              mergeReqInfo(other.getReqInfo());
24280            }
24281            if (other.hasStateToAccept()) {
24282              mergeStateToAccept(other.getStateToAccept());
24283            }
24284            if (other.hasFromURL()) {
24285              bitField0_ |= 0x00000004;
24286              fromURL_ = other.fromURL_;
24287              onChanged();
24288            }
24289            this.mergeUnknownFields(other.getUnknownFields());
24290            return this;
24291          }
24292    
24293          public final boolean isInitialized() {
24294            if (!hasReqInfo()) {
24295              
24296              return false;
24297            }
24298            if (!hasStateToAccept()) {
24299              
24300              return false;
24301            }
24302            if (!hasFromURL()) {
24303              
24304              return false;
24305            }
24306            if (!getReqInfo().isInitialized()) {
24307              
24308              return false;
24309            }
24310            if (!getStateToAccept().isInitialized()) {
24311              
24312              return false;
24313            }
24314            return true;
24315          }
24316    
24317          public Builder mergeFrom(
24318              com.google.protobuf.CodedInputStream input,
24319              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
24320              throws java.io.IOException {
24321            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parsedMessage = null;
24322            try {
24323              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
24324            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
24325              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto) e.getUnfinishedMessage();
24326              throw e;
24327            } finally {
24328              if (parsedMessage != null) {
24329                mergeFrom(parsedMessage);
24330              }
24331            }
24332            return this;
24333          }
24334          private int bitField0_;
24335    
24336          // required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;
24337          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
24338          private com.google.protobuf.SingleFieldBuilder<
24339              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
24340          /**
24341           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
24342           */
24343          public boolean hasReqInfo() {
24344            return ((bitField0_ & 0x00000001) == 0x00000001);
24345          }
24346          /**
24347           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
24348           */
24349          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
24350            if (reqInfoBuilder_ == null) {
24351              return reqInfo_;
24352            } else {
24353              return reqInfoBuilder_.getMessage();
24354            }
24355          }
24356          /**
24357           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
24358           */
24359          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
24360            if (reqInfoBuilder_ == null) {
24361              if (value == null) {
24362                throw new NullPointerException();
24363              }
24364              reqInfo_ = value;
24365              onChanged();
24366            } else {
24367              reqInfoBuilder_.setMessage(value);
24368            }
24369            bitField0_ |= 0x00000001;
24370            return this;
24371          }
24372          /**
24373           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
24374           */
24375          public Builder setReqInfo(
24376              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
24377            if (reqInfoBuilder_ == null) {
24378              reqInfo_ = builderForValue.build();
24379              onChanged();
24380            } else {
24381              reqInfoBuilder_.setMessage(builderForValue.build());
24382            }
24383            bitField0_ |= 0x00000001;
24384            return this;
24385          }
24386          /**
24387           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
24388           */
24389          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
24390            if (reqInfoBuilder_ == null) {
24391              if (((bitField0_ & 0x00000001) == 0x00000001) &&
24392                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
24393                reqInfo_ =
24394                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
24395              } else {
24396                reqInfo_ = value;
24397              }
24398              onChanged();
24399            } else {
24400              reqInfoBuilder_.mergeFrom(value);
24401            }
24402            bitField0_ |= 0x00000001;
24403            return this;
24404          }
24405          /**
24406           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
24407           */
24408          public Builder clearReqInfo() {
24409            if (reqInfoBuilder_ == null) {
24410              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
24411              onChanged();
24412            } else {
24413              reqInfoBuilder_.clear();
24414            }
24415            bitField0_ = (bitField0_ & ~0x00000001);
24416            return this;
24417          }
24418          /**
24419           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
24420           */
24421          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
24422            bitField0_ |= 0x00000001;
24423            onChanged();
24424            return getReqInfoFieldBuilder().getBuilder();
24425          }
24426          /**
24427           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
24428           */
24429          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
24430            if (reqInfoBuilder_ != null) {
24431              return reqInfoBuilder_.getMessageOrBuilder();
24432            } else {
24433              return reqInfo_;
24434            }
24435          }
24436          /**
24437           * <code>required .hadoop.hdfs.qjournal.RequestInfoProto reqInfo = 1;</code>
24438           */
24439          private com.google.protobuf.SingleFieldBuilder<
24440              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
24441              getReqInfoFieldBuilder() {
24442            if (reqInfoBuilder_ == null) {
24443              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
24444                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
24445                      reqInfo_,
24446                      getParentForChildren(),
24447                      isClean());
24448              reqInfo_ = null;
24449            }
24450            return reqInfoBuilder_;
24451          }
24452    
24453          // required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;
24454          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
24455          private com.google.protobuf.SingleFieldBuilder<
24456              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> stateToAcceptBuilder_;
24457          /**
24458           * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code>
24459           *
24460           * <pre>
24461           ** Details on the segment to recover 
24462           * </pre>
24463           */
24464          public boolean hasStateToAccept() {
24465            return ((bitField0_ & 0x00000002) == 0x00000002);
24466          }
24467          /**
24468           * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code>
24469           *
24470           * <pre>
24471           ** Details on the segment to recover 
24472           * </pre>
24473           */
24474          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getStateToAccept() {
24475            if (stateToAcceptBuilder_ == null) {
24476              return stateToAccept_;
24477            } else {
24478              return stateToAcceptBuilder_.getMessage();
24479            }
24480          }
24481          /**
24482           * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code>
24483           *
24484           * <pre>
24485           ** Details on the segment to recover 
24486           * </pre>
24487           */
24488          public Builder setStateToAccept(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
24489            if (stateToAcceptBuilder_ == null) {
24490              if (value == null) {
24491                throw new NullPointerException();
24492              }
24493              stateToAccept_ = value;
24494              onChanged();
24495            } else {
24496              stateToAcceptBuilder_.setMessage(value);
24497            }
24498            bitField0_ |= 0x00000002;
24499            return this;
24500          }
24501          /**
24502           * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code>
24503           *
24504           * <pre>
24505           ** Details on the segment to recover 
24506           * </pre>
24507           */
24508          public Builder setStateToAccept(
24509              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) {
24510            if (stateToAcceptBuilder_ == null) {
24511              stateToAccept_ = builderForValue.build();
24512              onChanged();
24513            } else {
24514              stateToAcceptBuilder_.setMessage(builderForValue.build());
24515            }
24516            bitField0_ |= 0x00000002;
24517            return this;
24518          }
24519          /**
24520           * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code>
24521           *
24522           * <pre>
24523           ** Details on the segment to recover 
24524           * </pre>
24525           */
24526          public Builder mergeStateToAccept(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
24527            if (stateToAcceptBuilder_ == null) {
24528              if (((bitField0_ & 0x00000002) == 0x00000002) &&
24529                  stateToAccept_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
24530                stateToAccept_ =
24531                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(stateToAccept_).mergeFrom(value).buildPartial();
24532              } else {
24533                stateToAccept_ = value;
24534              }
24535              onChanged();
24536            } else {
24537              stateToAcceptBuilder_.mergeFrom(value);
24538            }
24539            bitField0_ |= 0x00000002;
24540            return this;
24541          }
24542          /**
24543           * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code>
24544           *
24545           * <pre>
24546           ** Details on the segment to recover 
24547           * </pre>
24548           */
24549          public Builder clearStateToAccept() {
24550            if (stateToAcceptBuilder_ == null) {
24551              stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
24552              onChanged();
24553            } else {
24554              stateToAcceptBuilder_.clear();
24555            }
24556            bitField0_ = (bitField0_ & ~0x00000002);
24557            return this;
24558          }
24559          /**
24560           * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code>
24561           *
24562           * <pre>
24563           ** Details on the segment to recover 
24564           * </pre>
24565           */
24566          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getStateToAcceptBuilder() {
24567            bitField0_ |= 0x00000002;
24568            onChanged();
24569            return getStateToAcceptFieldBuilder().getBuilder();
24570          }
24571          /**
24572           * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code>
24573           *
24574           * <pre>
24575           ** Details on the segment to recover 
24576           * </pre>
24577           */
24578          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getStateToAcceptOrBuilder() {
24579            if (stateToAcceptBuilder_ != null) {
24580              return stateToAcceptBuilder_.getMessageOrBuilder();
24581            } else {
24582              return stateToAccept_;
24583            }
24584          }
24585          /**
24586           * <code>required .hadoop.hdfs.qjournal.SegmentStateProto stateToAccept = 2;</code>
24587           *
24588           * <pre>
24589           ** Details on the segment to recover 
24590           * </pre>
24591           */
24592          private com.google.protobuf.SingleFieldBuilder<
24593              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> 
24594              getStateToAcceptFieldBuilder() {
24595            if (stateToAcceptBuilder_ == null) {
24596              stateToAcceptBuilder_ = new com.google.protobuf.SingleFieldBuilder<
24597                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>(
24598                      stateToAccept_,
24599                      getParentForChildren(),
24600                      isClean());
24601              stateToAccept_ = null;
24602            }
24603            return stateToAcceptBuilder_;
24604          }
24605    
24606          // required string fromURL = 3;
24607          private java.lang.Object fromURL_ = "";
24608          /**
24609           * <code>required string fromURL = 3;</code>
24610           *
24611           * <pre>
24612           ** The URL from which the log may be copied 
24613           * </pre>
24614           */
24615          public boolean hasFromURL() {
24616            return ((bitField0_ & 0x00000004) == 0x00000004);
24617          }
24618          /**
24619           * <code>required string fromURL = 3;</code>
24620           *
24621           * <pre>
24622           ** The URL from which the log may be copied 
24623           * </pre>
24624           */
24625          public java.lang.String getFromURL() {
24626            java.lang.Object ref = fromURL_;
24627            if (!(ref instanceof java.lang.String)) {
24628              java.lang.String s = ((com.google.protobuf.ByteString) ref)
24629                  .toStringUtf8();
24630              fromURL_ = s;
24631              return s;
24632            } else {
24633              return (java.lang.String) ref;
24634            }
24635          }
24636          /**
24637           * <code>required string fromURL = 3;</code>
24638           *
24639           * <pre>
24640           ** The URL from which the log may be copied 
24641           * </pre>
24642           */
24643          public com.google.protobuf.ByteString
24644              getFromURLBytes() {
24645            java.lang.Object ref = fromURL_;
24646            if (ref instanceof String) {
24647              com.google.protobuf.ByteString b = 
24648                  com.google.protobuf.ByteString.copyFromUtf8(
24649                      (java.lang.String) ref);
24650              fromURL_ = b;
24651              return b;
24652            } else {
24653              return (com.google.protobuf.ByteString) ref;
24654            }
24655          }
24656          /**
24657           * <code>required string fromURL = 3;</code>
24658           *
24659           * <pre>
24660           ** The URL from which the log may be copied 
24661           * </pre>
24662           */
24663          public Builder setFromURL(
24664              java.lang.String value) {
24665            if (value == null) {
24666        throw new NullPointerException();
24667      }
24668      bitField0_ |= 0x00000004;
24669            fromURL_ = value;
24670            onChanged();
24671            return this;
24672          }
24673          /**
24674           * <code>required string fromURL = 3;</code>
24675           *
24676           * <pre>
24677           ** The URL from which the log may be copied 
24678           * </pre>
24679           */
24680          public Builder clearFromURL() {
24681            bitField0_ = (bitField0_ & ~0x00000004);
24682            fromURL_ = getDefaultInstance().getFromURL();
24683            onChanged();
24684            return this;
24685          }
24686          /**
24687           * <code>required string fromURL = 3;</code>
24688           *
24689           * <pre>
24690           ** The URL from which the log may be copied 
24691           * </pre>
24692           */
24693          public Builder setFromURLBytes(
24694              com.google.protobuf.ByteString value) {
24695            if (value == null) {
24696        throw new NullPointerException();
24697      }
24698      bitField0_ |= 0x00000004;
24699            fromURL_ = value;
24700            onChanged();
24701            return this;
24702          }
24703    
24704          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.AcceptRecoveryRequestProto)
24705        }
24706    
24707        static {
24708          defaultInstance = new AcceptRecoveryRequestProto(true);
24709          defaultInstance.initFields();
24710        }
24711    
24712        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.AcceptRecoveryRequestProto)
24713      }
24714    
24715      public interface AcceptRecoveryResponseProtoOrBuilder
24716          extends com.google.protobuf.MessageOrBuilder {
24717      }
24718      /**
24719       * Protobuf type {@code hadoop.hdfs.qjournal.AcceptRecoveryResponseProto}
24720       */
24721      public static final class AcceptRecoveryResponseProto extends
24722          com.google.protobuf.GeneratedMessage
24723          implements AcceptRecoveryResponseProtoOrBuilder {
24724        // Use AcceptRecoveryResponseProto.newBuilder() to construct.
24725        private AcceptRecoveryResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
24726          super(builder);
24727          this.unknownFields = builder.getUnknownFields();
24728        }
24729        private AcceptRecoveryResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
24730    
24731        private static final AcceptRecoveryResponseProto defaultInstance;
24732        public static AcceptRecoveryResponseProto getDefaultInstance() {
24733          return defaultInstance;
24734        }
24735    
24736        public AcceptRecoveryResponseProto getDefaultInstanceForType() {
24737          return defaultInstance;
24738        }
24739    
24740        private final com.google.protobuf.UnknownFieldSet unknownFields;
24741        @java.lang.Override
24742        public final com.google.protobuf.UnknownFieldSet
24743            getUnknownFields() {
24744          return this.unknownFields;
24745        }
24746        private AcceptRecoveryResponseProto(
24747            com.google.protobuf.CodedInputStream input,
24748            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
24749            throws com.google.protobuf.InvalidProtocolBufferException {
24750          initFields();
24751          com.google.protobuf.UnknownFieldSet.Builder unknownFields =
24752              com.google.protobuf.UnknownFieldSet.newBuilder();
24753          try {
24754            boolean done = false;
24755            while (!done) {
24756              int tag = input.readTag();
24757              switch (tag) {
24758                case 0:
24759                  done = true;
24760                  break;
24761                default: {
24762                  if (!parseUnknownField(input, unknownFields,
24763                                         extensionRegistry, tag)) {
24764                    done = true;
24765                  }
24766                  break;
24767                }
24768              }
24769            }
24770          } catch (com.google.protobuf.InvalidProtocolBufferException e) {
24771            throw e.setUnfinishedMessage(this);
24772          } catch (java.io.IOException e) {
24773            throw new com.google.protobuf.InvalidProtocolBufferException(
24774                e.getMessage()).setUnfinishedMessage(this);
24775          } finally {
24776            this.unknownFields = unknownFields.build();
24777            makeExtensionsImmutable();
24778          }
24779        }
24780        public static final com.google.protobuf.Descriptors.Descriptor
24781            getDescriptor() {
24782          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_AcceptRecoveryResponseProto_descriptor;
24783        }
24784    
24785        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
24786            internalGetFieldAccessorTable() {
24787          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_AcceptRecoveryResponseProto_fieldAccessorTable
24788              .ensureFieldAccessorsInitialized(
24789                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.Builder.class);
24790        }
24791    
24792        public static com.google.protobuf.Parser<AcceptRecoveryResponseProto> PARSER =
24793            new com.google.protobuf.AbstractParser<AcceptRecoveryResponseProto>() {
24794          public AcceptRecoveryResponseProto parsePartialFrom(
24795              com.google.protobuf.CodedInputStream input,
24796              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
24797              throws com.google.protobuf.InvalidProtocolBufferException {
24798            return new AcceptRecoveryResponseProto(input, extensionRegistry);
24799          }
24800        };
24801    
24802        @java.lang.Override
24803        public com.google.protobuf.Parser<AcceptRecoveryResponseProto> getParserForType() {
24804          return PARSER;
24805        }
24806    
24807        private void initFields() {
24808        }
24809        private byte memoizedIsInitialized = -1;
24810        public final boolean isInitialized() {
24811          byte isInitialized = memoizedIsInitialized;
24812          if (isInitialized != -1) return isInitialized == 1;
24813    
24814          memoizedIsInitialized = 1;
24815          return true;
24816        }
24817    
24818        public void writeTo(com.google.protobuf.CodedOutputStream output)
24819                            throws java.io.IOException {
24820          getSerializedSize();
24821          getUnknownFields().writeTo(output);
24822        }
24823    
24824        private int memoizedSerializedSize = -1;
24825        public int getSerializedSize() {
24826          int size = memoizedSerializedSize;
24827          if (size != -1) return size;
24828    
24829          size = 0;
24830          size += getUnknownFields().getSerializedSize();
24831          memoizedSerializedSize = size;
24832          return size;
24833        }
24834    
24835        private static final long serialVersionUID = 0L;
24836        @java.lang.Override
24837        protected java.lang.Object writeReplace()
24838            throws java.io.ObjectStreamException {
24839          return super.writeReplace();
24840        }
24841    
24842        @java.lang.Override
24843        public boolean equals(final java.lang.Object obj) {
24844          if (obj == this) {
24845           return true;
24846          }
24847          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto)) {
24848            return super.equals(obj);
24849          }
24850          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) obj;
24851    
24852          boolean result = true;
24853          result = result &&
24854              getUnknownFields().equals(other.getUnknownFields());
24855          return result;
24856        }
24857    
24858        private int memoizedHashCode = 0;
24859        @java.lang.Override
24860        public int hashCode() {
24861          if (memoizedHashCode != 0) {
24862            return memoizedHashCode;
24863          }
24864          int hash = 41;
24865          hash = (19 * hash) + getDescriptorForType().hashCode();
24866          hash = (29 * hash) + getUnknownFields().hashCode();
24867          memoizedHashCode = hash;
24868          return hash;
24869        }
24870    
24871        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
24872            com.google.protobuf.ByteString data)
24873            throws com.google.protobuf.InvalidProtocolBufferException {
24874          return PARSER.parseFrom(data);
24875        }
24876        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
24877            com.google.protobuf.ByteString data,
24878            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
24879            throws com.google.protobuf.InvalidProtocolBufferException {
24880          return PARSER.parseFrom(data, extensionRegistry);
24881        }
24882        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(byte[] data)
24883            throws com.google.protobuf.InvalidProtocolBufferException {
24884          return PARSER.parseFrom(data);
24885        }
24886        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
24887            byte[] data,
24888            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
24889            throws com.google.protobuf.InvalidProtocolBufferException {
24890          return PARSER.parseFrom(data, extensionRegistry);
24891        }
24892        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(java.io.InputStream input)
24893            throws java.io.IOException {
24894          return PARSER.parseFrom(input);
24895        }
24896        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
24897            java.io.InputStream input,
24898            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
24899            throws java.io.IOException {
24900          return PARSER.parseFrom(input, extensionRegistry);
24901        }
24902        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseDelimitedFrom(java.io.InputStream input)
24903            throws java.io.IOException {
24904          return PARSER.parseDelimitedFrom(input);
24905        }
24906        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseDelimitedFrom(
24907            java.io.InputStream input,
24908            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
24909            throws java.io.IOException {
24910          return PARSER.parseDelimitedFrom(input, extensionRegistry);
24911        }
24912        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
24913            com.google.protobuf.CodedInputStream input)
24914            throws java.io.IOException {
24915          return PARSER.parseFrom(input);
24916        }
24917        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
24918            com.google.protobuf.CodedInputStream input,
24919            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
24920            throws java.io.IOException {
24921          return PARSER.parseFrom(input, extensionRegistry);
24922        }
24923    
24924        public static Builder newBuilder() { return Builder.create(); }
24925        public Builder newBuilderForType() { return newBuilder(); }
24926        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto prototype) {
24927          return newBuilder().mergeFrom(prototype);
24928        }
24929        public Builder toBuilder() { return newBuilder(this); }
24930    
24931        @java.lang.Override
24932        protected Builder newBuilderForType(
24933            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
24934          Builder builder = new Builder(parent);
24935          return builder;
24936        }
24937        /**
24938         * Protobuf type {@code hadoop.hdfs.qjournal.AcceptRecoveryResponseProto}
24939         */
24940        public static final class Builder extends
24941            com.google.protobuf.GeneratedMessage.Builder<Builder>
24942           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProtoOrBuilder {
24943          public static final com.google.protobuf.Descriptors.Descriptor
24944              getDescriptor() {
24945            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_AcceptRecoveryResponseProto_descriptor;
24946          }
24947    
24948          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
24949              internalGetFieldAccessorTable() {
24950            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_AcceptRecoveryResponseProto_fieldAccessorTable
24951                .ensureFieldAccessorsInitialized(
24952                    org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.class, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.Builder.class);
24953          }
24954    
24955          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.newBuilder()
24956          private Builder() {
24957            maybeForceBuilderInitialization();
24958          }
24959    
24960          private Builder(
24961              com.google.protobuf.GeneratedMessage.BuilderParent parent) {
24962            super(parent);
24963            maybeForceBuilderInitialization();
24964          }
24965          private void maybeForceBuilderInitialization() {
24966            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
24967            }
24968          }
24969          private static Builder create() {
24970            return new Builder();
24971          }
24972    
24973          public Builder clear() {
24974            super.clear();
24975            return this;
24976          }
24977    
24978          public Builder clone() {
24979            return create().mergeFrom(buildPartial());
24980          }
24981    
24982          public com.google.protobuf.Descriptors.Descriptor
24983              getDescriptorForType() {
24984            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_qjournal_AcceptRecoveryResponseProto_descriptor;
24985          }
24986    
24987          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto getDefaultInstanceForType() {
24988            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance();
24989          }
24990    
24991          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto build() {
24992            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto result = buildPartial();
24993            if (!result.isInitialized()) {
24994              throw newUninitializedMessageException(result);
24995            }
24996            return result;
24997          }
24998    
24999          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto buildPartial() {
25000            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto(this);
25001            onBuilt();
25002            return result;
25003          }
25004    
25005          public Builder mergeFrom(com.google.protobuf.Message other) {
25006            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) {
25007              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto)other);
25008            } else {
25009              super.mergeFrom(other);
25010              return this;
25011            }
25012          }
25013    
25014          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto other) {
25015            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance()) return this;
25016            this.mergeUnknownFields(other.getUnknownFields());
25017            return this;
25018          }
25019    
25020          public final boolean isInitialized() {
25021            return true;
25022          }
25023    
25024          public Builder mergeFrom(
25025              com.google.protobuf.CodedInputStream input,
25026              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
25027              throws java.io.IOException {
25028            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parsedMessage = null;
25029            try {
25030              parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
25031            } catch (com.google.protobuf.InvalidProtocolBufferException e) {
25032              parsedMessage = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) e.getUnfinishedMessage();
25033              throw e;
25034            } finally {
25035              if (parsedMessage != null) {
25036                mergeFrom(parsedMessage);
25037              }
25038            }
25039            return this;
25040          }
25041    
25042          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.qjournal.AcceptRecoveryResponseProto)
25043        }
25044    
25045        static {
25046          defaultInstance = new AcceptRecoveryResponseProto(true);
25047          defaultInstance.initFields();
25048        }
25049    
25050        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.AcceptRecoveryResponseProto)
25051      }
25052    
25053      /**
25054       * Protobuf service {@code hadoop.hdfs.qjournal.QJournalProtocolService}
25055       *
25056       * <pre>
25057       **
25058       * Protocol used to journal edits to a JournalNode.
25059       * See the request and response for details of rpc call.
25060       * </pre>
25061       */
25062      public static abstract class QJournalProtocolService
25063          implements com.google.protobuf.Service {
25064        protected QJournalProtocolService() {}
25065    
25066        public interface Interface {
25067          /**
25068           * <code>rpc isFormatted(.hadoop.hdfs.qjournal.IsFormattedRequestProto) returns (.hadoop.hdfs.qjournal.IsFormattedResponseProto);</code>
25069           */
25070          public abstract void isFormatted(
25071              com.google.protobuf.RpcController controller,
25072              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
25073              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto> done);
25074    
25075          /**
25076           * <code>rpc discardSegments(.hadoop.hdfs.qjournal.DiscardSegmentsRequestProto) returns (.hadoop.hdfs.qjournal.DiscardSegmentsResponseProto);</code>
25077           */
25078          public abstract void discardSegments(
25079              com.google.protobuf.RpcController controller,
25080              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto request,
25081              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto> done);
25082    
25083          /**
25084           * <code>rpc getJournalCTime(.hadoop.hdfs.qjournal.GetJournalCTimeRequestProto) returns (.hadoop.hdfs.qjournal.GetJournalCTimeResponseProto);</code>
25085           */
25086          public abstract void getJournalCTime(
25087              com.google.protobuf.RpcController controller,
25088              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto request,
25089              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto> done);
25090    
25091          /**
25092           * <code>rpc doPreUpgrade(.hadoop.hdfs.qjournal.DoPreUpgradeRequestProto) returns (.hadoop.hdfs.qjournal.DoPreUpgradeResponseProto);</code>
25093           */
25094          public abstract void doPreUpgrade(
25095              com.google.protobuf.RpcController controller,
25096              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto request,
25097              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto> done);
25098    
25099          /**
25100           * <code>rpc doUpgrade(.hadoop.hdfs.qjournal.DoUpgradeRequestProto) returns (.hadoop.hdfs.qjournal.DoUpgradeResponseProto);</code>
25101           */
25102          public abstract void doUpgrade(
25103              com.google.protobuf.RpcController controller,
25104              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto request,
25105              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto> done);
25106    
25107          /**
25108           * <code>rpc doFinalize(.hadoop.hdfs.qjournal.DoFinalizeRequestProto) returns (.hadoop.hdfs.qjournal.DoFinalizeResponseProto);</code>
25109           */
25110          public abstract void doFinalize(
25111              com.google.protobuf.RpcController controller,
25112              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto request,
25113              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto> done);
25114    
25115          /**
25116           * <code>rpc canRollBack(.hadoop.hdfs.qjournal.CanRollBackRequestProto) returns (.hadoop.hdfs.qjournal.CanRollBackResponseProto);</code>
25117           */
25118          public abstract void canRollBack(
25119              com.google.protobuf.RpcController controller,
25120              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto request,
25121              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto> done);
25122    
25123          /**
25124           * <code>rpc doRollback(.hadoop.hdfs.qjournal.DoRollbackRequestProto) returns (.hadoop.hdfs.qjournal.DoRollbackResponseProto);</code>
25125           */
25126          public abstract void doRollback(
25127              com.google.protobuf.RpcController controller,
25128              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto request,
25129              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto> done);
25130    
25131          /**
25132           * <code>rpc getJournalState(.hadoop.hdfs.qjournal.GetJournalStateRequestProto) returns (.hadoop.hdfs.qjournal.GetJournalStateResponseProto);</code>
25133           */
25134          public abstract void getJournalState(
25135              com.google.protobuf.RpcController controller,
25136              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
25137              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto> done);
25138    
25139          /**
25140           * <code>rpc newEpoch(.hadoop.hdfs.qjournal.NewEpochRequestProto) returns (.hadoop.hdfs.qjournal.NewEpochResponseProto);</code>
25141           */
25142          public abstract void newEpoch(
25143              com.google.protobuf.RpcController controller,
25144              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
25145              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto> done);
25146    
25147          /**
25148           * <code>rpc format(.hadoop.hdfs.qjournal.FormatRequestProto) returns (.hadoop.hdfs.qjournal.FormatResponseProto);</code>
25149           */
25150          public abstract void format(
25151              com.google.protobuf.RpcController controller,
25152              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
25153              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto> done);
25154    
25155          /**
25156           * <code>rpc journal(.hadoop.hdfs.qjournal.JournalRequestProto) returns (.hadoop.hdfs.qjournal.JournalResponseProto);</code>
25157           */
25158          public abstract void journal(
25159              com.google.protobuf.RpcController controller,
25160              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
25161              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto> done);
25162    
25163          /**
25164           * <code>rpc heartbeat(.hadoop.hdfs.qjournal.HeartbeatRequestProto) returns (.hadoop.hdfs.qjournal.HeartbeatResponseProto);</code>
25165           */
25166          public abstract void heartbeat(
25167              com.google.protobuf.RpcController controller,
25168              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
25169              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto> done);
25170    
25171          /**
25172           * <code>rpc startLogSegment(.hadoop.hdfs.qjournal.StartLogSegmentRequestProto) returns (.hadoop.hdfs.qjournal.StartLogSegmentResponseProto);</code>
25173           */
25174          public abstract void startLogSegment(
25175              com.google.protobuf.RpcController controller,
25176              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
25177              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto> done);
25178    
25179          /**
25180           * <code>rpc finalizeLogSegment(.hadoop.hdfs.qjournal.FinalizeLogSegmentRequestProto) returns (.hadoop.hdfs.qjournal.FinalizeLogSegmentResponseProto);</code>
25181           */
25182          public abstract void finalizeLogSegment(
25183              com.google.protobuf.RpcController controller,
25184              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
25185              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto> done);
25186    
25187          /**
25188           * <code>rpc purgeLogs(.hadoop.hdfs.qjournal.PurgeLogsRequestProto) returns (.hadoop.hdfs.qjournal.PurgeLogsResponseProto);</code>
25189           */
25190          public abstract void purgeLogs(
25191              com.google.protobuf.RpcController controller,
25192              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
25193              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto> done);
25194    
25195          /**
25196           * <code>rpc getEditLogManifest(.hadoop.hdfs.qjournal.GetEditLogManifestRequestProto) returns (.hadoop.hdfs.qjournal.GetEditLogManifestResponseProto);</code>
25197           */
25198          public abstract void getEditLogManifest(
25199              com.google.protobuf.RpcController controller,
25200              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
25201              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto> done);
25202    
25203          /**
25204           * <code>rpc prepareRecovery(.hadoop.hdfs.qjournal.PrepareRecoveryRequestProto) returns (.hadoop.hdfs.qjournal.PrepareRecoveryResponseProto);</code>
25205           */
25206          public abstract void prepareRecovery(
25207              com.google.protobuf.RpcController controller,
25208              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
25209              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto> done);
25210    
25211          /**
25212           * <code>rpc acceptRecovery(.hadoop.hdfs.qjournal.AcceptRecoveryRequestProto) returns (.hadoop.hdfs.qjournal.AcceptRecoveryResponseProto);</code>
25213           */
25214          public abstract void acceptRecovery(
25215              com.google.protobuf.RpcController controller,
25216              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
25217              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto> done);
25218    
25219        }
25220    
25221        public static com.google.protobuf.Service newReflectiveService(
25222            final Interface impl) {
25223          return new QJournalProtocolService() {
25224            @java.lang.Override
25225            public  void isFormatted(
25226                com.google.protobuf.RpcController controller,
25227                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
25228                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto> done) {
25229              impl.isFormatted(controller, request, done);
25230            }
25231    
25232            @java.lang.Override
25233            public  void discardSegments(
25234                com.google.protobuf.RpcController controller,
25235                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto request,
25236                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto> done) {
25237              impl.discardSegments(controller, request, done);
25238            }
25239    
25240            @java.lang.Override
25241            public  void getJournalCTime(
25242                com.google.protobuf.RpcController controller,
25243                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto request,
25244                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto> done) {
25245              impl.getJournalCTime(controller, request, done);
25246            }
25247    
25248            @java.lang.Override
25249            public  void doPreUpgrade(
25250                com.google.protobuf.RpcController controller,
25251                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto request,
25252                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto> done) {
25253              impl.doPreUpgrade(controller, request, done);
25254            }
25255    
25256            @java.lang.Override
25257            public  void doUpgrade(
25258                com.google.protobuf.RpcController controller,
25259                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto request,
25260                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto> done) {
25261              impl.doUpgrade(controller, request, done);
25262            }
25263    
25264            @java.lang.Override
25265            public  void doFinalize(
25266                com.google.protobuf.RpcController controller,
25267                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto request,
25268                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto> done) {
25269              impl.doFinalize(controller, request, done);
25270            }
25271    
25272            @java.lang.Override
25273            public  void canRollBack(
25274                com.google.protobuf.RpcController controller,
25275                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto request,
25276                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto> done) {
25277              impl.canRollBack(controller, request, done);
25278            }
25279    
25280            @java.lang.Override
25281            public  void doRollback(
25282                com.google.protobuf.RpcController controller,
25283                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto request,
25284                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto> done) {
25285              impl.doRollback(controller, request, done);
25286            }
25287    
25288            @java.lang.Override
25289            public  void getJournalState(
25290                com.google.protobuf.RpcController controller,
25291                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
25292                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto> done) {
25293              impl.getJournalState(controller, request, done);
25294            }
25295    
25296            @java.lang.Override
25297            public  void newEpoch(
25298                com.google.protobuf.RpcController controller,
25299                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
25300                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto> done) {
25301              impl.newEpoch(controller, request, done);
25302            }
25303    
25304            @java.lang.Override
25305            public  void format(
25306                com.google.protobuf.RpcController controller,
25307                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
25308                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto> done) {
25309              impl.format(controller, request, done);
25310            }
25311    
25312            @java.lang.Override
25313            public  void journal(
25314                com.google.protobuf.RpcController controller,
25315                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
25316                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto> done) {
25317              impl.journal(controller, request, done);
25318            }
25319    
25320            @java.lang.Override
25321            public  void heartbeat(
25322                com.google.protobuf.RpcController controller,
25323                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
25324                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto> done) {
25325              impl.heartbeat(controller, request, done);
25326            }
25327    
25328            @java.lang.Override
25329            public  void startLogSegment(
25330                com.google.protobuf.RpcController controller,
25331                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
25332                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto> done) {
25333              impl.startLogSegment(controller, request, done);
25334            }
25335    
25336            @java.lang.Override
25337            public  void finalizeLogSegment(
25338                com.google.protobuf.RpcController controller,
25339                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
25340                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto> done) {
25341              impl.finalizeLogSegment(controller, request, done);
25342            }
25343    
25344            @java.lang.Override
25345            public  void purgeLogs(
25346                com.google.protobuf.RpcController controller,
25347                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
25348                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto> done) {
25349              impl.purgeLogs(controller, request, done);
25350            }
25351    
25352            @java.lang.Override
25353            public  void getEditLogManifest(
25354                com.google.protobuf.RpcController controller,
25355                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
25356                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto> done) {
25357              impl.getEditLogManifest(controller, request, done);
25358            }
25359    
25360            @java.lang.Override
25361            public  void prepareRecovery(
25362                com.google.protobuf.RpcController controller,
25363                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
25364                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto> done) {
25365              impl.prepareRecovery(controller, request, done);
25366            }
25367    
25368            @java.lang.Override
25369            public  void acceptRecovery(
25370                com.google.protobuf.RpcController controller,
25371                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
25372                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto> done) {
25373              impl.acceptRecovery(controller, request, done);
25374            }
25375    
25376          };
25377        }
25378    
25379        public static com.google.protobuf.BlockingService
25380            newReflectiveBlockingService(final BlockingInterface impl) {
25381          return new com.google.protobuf.BlockingService() {
25382            public final com.google.protobuf.Descriptors.ServiceDescriptor
25383                getDescriptorForType() {
25384              return getDescriptor();
25385            }
25386    
25387            public final com.google.protobuf.Message callBlockingMethod(
25388                com.google.protobuf.Descriptors.MethodDescriptor method,
25389                com.google.protobuf.RpcController controller,
25390                com.google.protobuf.Message request)
25391                throws com.google.protobuf.ServiceException {
25392              if (method.getService() != getDescriptor()) {
25393                throw new java.lang.IllegalArgumentException(
25394                  "Service.callBlockingMethod() given method descriptor for " +
25395                  "wrong service type.");
25396              }
25397              switch(method.getIndex()) {
25398                case 0:
25399                  return impl.isFormatted(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)request);
25400                case 1:
25401                  return impl.discardSegments(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto)request);
25402                case 2:
25403                  return impl.getJournalCTime(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto)request);
25404                case 3:
25405                  return impl.doPreUpgrade(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto)request);
25406                case 4:
25407                  return impl.doUpgrade(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto)request);
25408                case 5:
25409                  return impl.doFinalize(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto)request);
25410                case 6:
25411                  return impl.canRollBack(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto)request);
25412                case 7:
25413                  return impl.doRollback(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto)request);
25414                case 8:
25415                  return impl.getJournalState(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)request);
25416                case 9:
25417                  return impl.newEpoch(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)request);
25418                case 10:
25419                  return impl.format(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)request);
25420                case 11:
25421                  return impl.journal(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)request);
25422                case 12:
25423                  return impl.heartbeat(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)request);
25424                case 13:
25425                  return impl.startLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)request);
25426                case 14:
25427                  return impl.finalizeLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)request);
25428                case 15:
25429                  return impl.purgeLogs(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)request);
25430                case 16:
25431                  return impl.getEditLogManifest(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)request);
25432                case 17:
25433                  return impl.prepareRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)request);
25434                case 18:
25435                  return impl.acceptRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)request);
25436                default:
25437                  throw new java.lang.AssertionError("Can't get here.");
25438              }
25439            }
25440    
25441            public final com.google.protobuf.Message
25442                getRequestPrototype(
25443                com.google.protobuf.Descriptors.MethodDescriptor method) {
25444              if (method.getService() != getDescriptor()) {
25445                throw new java.lang.IllegalArgumentException(
25446                  "Service.getRequestPrototype() given method " +
25447                  "descriptor for wrong service type.");
25448              }
25449              switch(method.getIndex()) {
25450                case 0:
25451                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance();
25452                case 1:
25453                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto.getDefaultInstance();
25454                case 2:
25455                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto.getDefaultInstance();
25456                case 3:
25457                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto.getDefaultInstance();
25458                case 4:
25459                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto.getDefaultInstance();
25460                case 5:
25461                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto.getDefaultInstance();
25462                case 6:
25463                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto.getDefaultInstance();
25464                case 7:
25465                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto.getDefaultInstance();
25466                case 8:
25467                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance();
25468                case 9:
25469                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance();
25470                case 10:
25471                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance();
25472                case 11:
25473                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance();
25474                case 12:
25475                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
25476                case 13:
25477                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance();
25478                case 14:
25479                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance();
25480                case 15:
25481                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance();
25482                case 16:
25483                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance();
25484                case 17:
25485                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance();
25486                case 18:
25487                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance();
25488                default:
25489                  throw new java.lang.AssertionError("Can't get here.");
25490              }
25491            }
25492    
25493            public final com.google.protobuf.Message
25494                getResponsePrototype(
25495                com.google.protobuf.Descriptors.MethodDescriptor method) {
25496              if (method.getService() != getDescriptor()) {
25497                throw new java.lang.IllegalArgumentException(
25498                  "Service.getResponsePrototype() given method " +
25499                  "descriptor for wrong service type.");
25500              }
25501              switch(method.getIndex()) {
25502                case 0:
25503                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance();
25504                case 1:
25505                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto.getDefaultInstance();
25506                case 2:
25507                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto.getDefaultInstance();
25508                case 3:
25509                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto.getDefaultInstance();
25510                case 4:
25511                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto.getDefaultInstance();
25512                case 5:
25513                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto.getDefaultInstance();
25514                case 6:
25515                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto.getDefaultInstance();
25516                case 7:
25517                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto.getDefaultInstance();
25518                case 8:
25519                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance();
25520                case 9:
25521                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance();
25522                case 10:
25523                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance();
25524                case 11:
25525                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance();
25526                case 12:
25527                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
25528                case 13:
25529                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance();
25530                case 14:
25531                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance();
25532                case 15:
25533                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance();
25534                case 16:
25535                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance();
25536                case 17:
25537                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance();
25538                case 18:
25539                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance();
25540                default:
25541                  throw new java.lang.AssertionError("Can't get here.");
25542              }
25543            }
25544    
25545          };
25546        }
25547    
25548        /**
25549         * <code>rpc isFormatted(.hadoop.hdfs.qjournal.IsFormattedRequestProto) returns (.hadoop.hdfs.qjournal.IsFormattedResponseProto);</code>
25550         */
25551        public abstract void isFormatted(
25552            com.google.protobuf.RpcController controller,
25553            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
25554            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto> done);
25555    
25556        /**
25557         * <code>rpc discardSegments(.hadoop.hdfs.qjournal.DiscardSegmentsRequestProto) returns (.hadoop.hdfs.qjournal.DiscardSegmentsResponseProto);</code>
25558         */
25559        public abstract void discardSegments(
25560            com.google.protobuf.RpcController controller,
25561            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto request,
25562            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto> done);
25563    
25564        /**
25565         * <code>rpc getJournalCTime(.hadoop.hdfs.qjournal.GetJournalCTimeRequestProto) returns (.hadoop.hdfs.qjournal.GetJournalCTimeResponseProto);</code>
25566         */
25567        public abstract void getJournalCTime(
25568            com.google.protobuf.RpcController controller,
25569            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto request,
25570            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto> done);
25571    
25572        /**
25573         * <code>rpc doPreUpgrade(.hadoop.hdfs.qjournal.DoPreUpgradeRequestProto) returns (.hadoop.hdfs.qjournal.DoPreUpgradeResponseProto);</code>
25574         */
25575        public abstract void doPreUpgrade(
25576            com.google.protobuf.RpcController controller,
25577            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto request,
25578            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto> done);
25579    
25580        /**
25581         * <code>rpc doUpgrade(.hadoop.hdfs.qjournal.DoUpgradeRequestProto) returns (.hadoop.hdfs.qjournal.DoUpgradeResponseProto);</code>
25582         */
25583        public abstract void doUpgrade(
25584            com.google.protobuf.RpcController controller,
25585            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto request,
25586            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto> done);
25587    
25588        /**
25589         * <code>rpc doFinalize(.hadoop.hdfs.qjournal.DoFinalizeRequestProto) returns (.hadoop.hdfs.qjournal.DoFinalizeResponseProto);</code>
25590         */
25591        public abstract void doFinalize(
25592            com.google.protobuf.RpcController controller,
25593            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto request,
25594            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto> done);
25595    
25596        /**
25597         * <code>rpc canRollBack(.hadoop.hdfs.qjournal.CanRollBackRequestProto) returns (.hadoop.hdfs.qjournal.CanRollBackResponseProto);</code>
25598         */
25599        public abstract void canRollBack(
25600            com.google.protobuf.RpcController controller,
25601            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto request,
25602            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto> done);
25603    
25604        /**
25605         * <code>rpc doRollback(.hadoop.hdfs.qjournal.DoRollbackRequestProto) returns (.hadoop.hdfs.qjournal.DoRollbackResponseProto);</code>
25606         */
25607        public abstract void doRollback(
25608            com.google.protobuf.RpcController controller,
25609            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto request,
25610            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto> done);
25611    
25612        /**
25613         * <code>rpc getJournalState(.hadoop.hdfs.qjournal.GetJournalStateRequestProto) returns (.hadoop.hdfs.qjournal.GetJournalStateResponseProto);</code>
25614         */
25615        public abstract void getJournalState(
25616            com.google.protobuf.RpcController controller,
25617            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
25618            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto> done);
25619    
25620        /**
25621         * <code>rpc newEpoch(.hadoop.hdfs.qjournal.NewEpochRequestProto) returns (.hadoop.hdfs.qjournal.NewEpochResponseProto);</code>
25622         */
25623        public abstract void newEpoch(
25624            com.google.protobuf.RpcController controller,
25625            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
25626            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto> done);
25627    
25628        /**
25629         * <code>rpc format(.hadoop.hdfs.qjournal.FormatRequestProto) returns (.hadoop.hdfs.qjournal.FormatResponseProto);</code>
25630         */
25631        public abstract void format(
25632            com.google.protobuf.RpcController controller,
25633            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
25634            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto> done);
25635    
25636        /**
25637         * <code>rpc journal(.hadoop.hdfs.qjournal.JournalRequestProto) returns (.hadoop.hdfs.qjournal.JournalResponseProto);</code>
25638         */
25639        public abstract void journal(
25640            com.google.protobuf.RpcController controller,
25641            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
25642            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto> done);
25643    
25644        /**
25645         * <code>rpc heartbeat(.hadoop.hdfs.qjournal.HeartbeatRequestProto) returns (.hadoop.hdfs.qjournal.HeartbeatResponseProto);</code>
25646         */
25647        public abstract void heartbeat(
25648            com.google.protobuf.RpcController controller,
25649            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
25650            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto> done);
25651    
25652        /**
25653         * <code>rpc startLogSegment(.hadoop.hdfs.qjournal.StartLogSegmentRequestProto) returns (.hadoop.hdfs.qjournal.StartLogSegmentResponseProto);</code>
25654         */
25655        public abstract void startLogSegment(
25656            com.google.protobuf.RpcController controller,
25657            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
25658            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto> done);
25659    
25660        /**
25661         * <code>rpc finalizeLogSegment(.hadoop.hdfs.qjournal.FinalizeLogSegmentRequestProto) returns (.hadoop.hdfs.qjournal.FinalizeLogSegmentResponseProto);</code>
25662         */
25663        public abstract void finalizeLogSegment(
25664            com.google.protobuf.RpcController controller,
25665            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
25666            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto> done);
25667    
25668        /**
25669         * <code>rpc purgeLogs(.hadoop.hdfs.qjournal.PurgeLogsRequestProto) returns (.hadoop.hdfs.qjournal.PurgeLogsResponseProto);</code>
25670         */
25671        public abstract void purgeLogs(
25672            com.google.protobuf.RpcController controller,
25673            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
25674            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto> done);
25675    
25676        /**
25677         * <code>rpc getEditLogManifest(.hadoop.hdfs.qjournal.GetEditLogManifestRequestProto) returns (.hadoop.hdfs.qjournal.GetEditLogManifestResponseProto);</code>
25678         */
25679        public abstract void getEditLogManifest(
25680            com.google.protobuf.RpcController controller,
25681            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
25682            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto> done);
25683    
25684        /**
25685         * <code>rpc prepareRecovery(.hadoop.hdfs.qjournal.PrepareRecoveryRequestProto) returns (.hadoop.hdfs.qjournal.PrepareRecoveryResponseProto);</code>
25686         */
25687        public abstract void prepareRecovery(
25688            com.google.protobuf.RpcController controller,
25689            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
25690            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto> done);
25691    
25692        /**
25693         * <code>rpc acceptRecovery(.hadoop.hdfs.qjournal.AcceptRecoveryRequestProto) returns (.hadoop.hdfs.qjournal.AcceptRecoveryResponseProto);</code>
25694         */
25695        public abstract void acceptRecovery(
25696            com.google.protobuf.RpcController controller,
25697            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
25698            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto> done);
25699    
25700        public static final
25701            com.google.protobuf.Descriptors.ServiceDescriptor
25702            getDescriptor() {
25703          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.getDescriptor().getServices().get(0);
25704        }
25705        public final com.google.protobuf.Descriptors.ServiceDescriptor
25706            getDescriptorForType() {
25707          return getDescriptor();
25708        }
25709    
25710        public final void callMethod(
25711            com.google.protobuf.Descriptors.MethodDescriptor method,
25712            com.google.protobuf.RpcController controller,
25713            com.google.protobuf.Message request,
25714            com.google.protobuf.RpcCallback<
25715              com.google.protobuf.Message> done) {
25716          if (method.getService() != getDescriptor()) {
25717            throw new java.lang.IllegalArgumentException(
25718              "Service.callMethod() given method descriptor for wrong " +
25719              "service type.");
25720          }
25721          switch(method.getIndex()) {
25722            case 0:
25723              this.isFormatted(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)request,
25724                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto>specializeCallback(
25725                  done));
25726              return;
25727            case 1:
25728              this.discardSegments(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto)request,
25729                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto>specializeCallback(
25730                  done));
25731              return;
25732            case 2:
25733              this.getJournalCTime(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto)request,
25734                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto>specializeCallback(
25735                  done));
25736              return;
25737            case 3:
25738              this.doPreUpgrade(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto)request,
25739                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto>specializeCallback(
25740                  done));
25741              return;
25742            case 4:
25743              this.doUpgrade(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto)request,
25744                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto>specializeCallback(
25745                  done));
25746              return;
25747            case 5:
25748              this.doFinalize(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto)request,
25749                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto>specializeCallback(
25750                  done));
25751              return;
25752            case 6:
25753              this.canRollBack(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto)request,
25754                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto>specializeCallback(
25755                  done));
25756              return;
25757            case 7:
25758              this.doRollback(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto)request,
25759                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto>specializeCallback(
25760                  done));
25761              return;
25762            case 8:
25763              this.getJournalState(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)request,
25764                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto>specializeCallback(
25765                  done));
25766              return;
25767            case 9:
25768              this.newEpoch(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)request,
25769                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto>specializeCallback(
25770                  done));
25771              return;
25772            case 10:
25773              this.format(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)request,
25774                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto>specializeCallback(
25775                  done));
25776              return;
25777            case 11:
25778              this.journal(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)request,
25779                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto>specializeCallback(
25780                  done));
25781              return;
25782            case 12:
25783              this.heartbeat(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)request,
25784                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto>specializeCallback(
25785                  done));
25786              return;
25787            case 13:
25788              this.startLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)request,
25789                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto>specializeCallback(
25790                  done));
25791              return;
25792            case 14:
25793              this.finalizeLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)request,
25794                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto>specializeCallback(
25795                  done));
25796              return;
25797            case 15:
25798              this.purgeLogs(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)request,
25799                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto>specializeCallback(
25800                  done));
25801              return;
25802            case 16:
25803              this.getEditLogManifest(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)request,
25804                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto>specializeCallback(
25805                  done));
25806              return;
25807            case 17:
25808              this.prepareRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)request,
25809                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto>specializeCallback(
25810                  done));
25811              return;
25812            case 18:
25813              this.acceptRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)request,
25814                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto>specializeCallback(
25815                  done));
25816              return;
25817            default:
25818              throw new java.lang.AssertionError("Can't get here.");
25819          }
25820        }
25821    
25822        public final com.google.protobuf.Message
25823            getRequestPrototype(
25824            com.google.protobuf.Descriptors.MethodDescriptor method) {
25825          if (method.getService() != getDescriptor()) {
25826            throw new java.lang.IllegalArgumentException(
25827              "Service.getRequestPrototype() given method " +
25828              "descriptor for wrong service type.");
25829          }
25830          switch(method.getIndex()) {
25831            case 0:
25832              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance();
25833            case 1:
25834              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto.getDefaultInstance();
25835            case 2:
25836              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto.getDefaultInstance();
25837            case 3:
25838              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto.getDefaultInstance();
25839            case 4:
25840              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto.getDefaultInstance();
25841            case 5:
25842              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto.getDefaultInstance();
25843            case 6:
25844              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto.getDefaultInstance();
25845            case 7:
25846              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto.getDefaultInstance();
25847            case 8:
25848              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance();
25849            case 9:
25850              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance();
25851            case 10:
25852              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance();
25853            case 11:
25854              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance();
25855            case 12:
25856              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
25857            case 13:
25858              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance();
25859            case 14:
25860              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance();
25861            case 15:
25862              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance();
25863            case 16:
25864              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance();
25865            case 17:
25866              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance();
25867            case 18:
25868              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance();
25869            default:
25870              throw new java.lang.AssertionError("Can't get here.");
25871          }
25872        }
25873    
25874        public final com.google.protobuf.Message
25875            getResponsePrototype(
25876            com.google.protobuf.Descriptors.MethodDescriptor method) {
25877          if (method.getService() != getDescriptor()) {
25878            throw new java.lang.IllegalArgumentException(
25879              "Service.getResponsePrototype() given method " +
25880              "descriptor for wrong service type.");
25881          }
25882          switch(method.getIndex()) {
25883            case 0:
25884              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance();
25885            case 1:
25886              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto.getDefaultInstance();
25887            case 2:
25888              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto.getDefaultInstance();
25889            case 3:
25890              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto.getDefaultInstance();
25891            case 4:
25892              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto.getDefaultInstance();
25893            case 5:
25894              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto.getDefaultInstance();
25895            case 6:
25896              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto.getDefaultInstance();
25897            case 7:
25898              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto.getDefaultInstance();
25899            case 8:
25900              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance();
25901            case 9:
25902              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance();
25903            case 10:
25904              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance();
25905            case 11:
25906              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance();
25907            case 12:
25908              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
25909            case 13:
25910              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance();
25911            case 14:
25912              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance();
25913            case 15:
25914              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance();
25915            case 16:
25916              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance();
25917            case 17:
25918              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance();
25919            case 18:
25920              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance();
25921            default:
25922              throw new java.lang.AssertionError("Can't get here.");
25923          }
25924        }
25925    
25926        public static Stub newStub(
25927            com.google.protobuf.RpcChannel channel) {
25928          return new Stub(channel);
25929        }
25930    
25931        public static final class Stub extends org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.QJournalProtocolService implements Interface {
25932          private Stub(com.google.protobuf.RpcChannel channel) {
25933            this.channel = channel;
25934          }
25935    
25936          private final com.google.protobuf.RpcChannel channel;
25937    
25938          public com.google.protobuf.RpcChannel getChannel() {
25939            return channel;
25940          }
25941    
25942          public  void isFormatted(
25943              com.google.protobuf.RpcController controller,
25944              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
25945              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto> done) {
25946            channel.callMethod(
25947              getDescriptor().getMethods().get(0),
25948              controller,
25949              request,
25950              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance(),
25951              com.google.protobuf.RpcUtil.generalizeCallback(
25952                done,
25953                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.class,
25954                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance()));
25955          }
25956    
25957          public  void discardSegments(
25958              com.google.protobuf.RpcController controller,
25959              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto request,
25960              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto> done) {
25961            channel.callMethod(
25962              getDescriptor().getMethods().get(1),
25963              controller,
25964              request,
25965              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto.getDefaultInstance(),
25966              com.google.protobuf.RpcUtil.generalizeCallback(
25967                done,
25968                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto.class,
25969                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto.getDefaultInstance()));
25970          }
25971    
25972          public  void getJournalCTime(
25973              com.google.protobuf.RpcController controller,
25974              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto request,
25975              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto> done) {
25976            channel.callMethod(
25977              getDescriptor().getMethods().get(2),
25978              controller,
25979              request,
25980              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto.getDefaultInstance(),
25981              com.google.protobuf.RpcUtil.generalizeCallback(
25982                done,
25983                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto.class,
25984                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto.getDefaultInstance()));
25985          }
25986    
25987          public  void doPreUpgrade(
25988              com.google.protobuf.RpcController controller,
25989              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto request,
25990              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto> done) {
25991            channel.callMethod(
25992              getDescriptor().getMethods().get(3),
25993              controller,
25994              request,
25995              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto.getDefaultInstance(),
25996              com.google.protobuf.RpcUtil.generalizeCallback(
25997                done,
25998                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto.class,
25999                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto.getDefaultInstance()));
26000          }
26001    
26002          public  void doUpgrade(
26003              com.google.protobuf.RpcController controller,
26004              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto request,
26005              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto> done) {
26006            channel.callMethod(
26007              getDescriptor().getMethods().get(4),
26008              controller,
26009              request,
26010              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto.getDefaultInstance(),
26011              com.google.protobuf.RpcUtil.generalizeCallback(
26012                done,
26013                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto.class,
26014                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto.getDefaultInstance()));
26015          }
26016    
26017          public  void doFinalize(
26018              com.google.protobuf.RpcController controller,
26019              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto request,
26020              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto> done) {
26021            channel.callMethod(
26022              getDescriptor().getMethods().get(5),
26023              controller,
26024              request,
26025              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto.getDefaultInstance(),
26026              com.google.protobuf.RpcUtil.generalizeCallback(
26027                done,
26028                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto.class,
26029                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto.getDefaultInstance()));
26030          }
26031    
26032          public  void canRollBack(
26033              com.google.protobuf.RpcController controller,
26034              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto request,
26035              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto> done) {
26036            channel.callMethod(
26037              getDescriptor().getMethods().get(6),
26038              controller,
26039              request,
26040              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto.getDefaultInstance(),
26041              com.google.protobuf.RpcUtil.generalizeCallback(
26042                done,
26043                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto.class,
26044                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto.getDefaultInstance()));
26045          }
26046    
26047          public  void doRollback(
26048              com.google.protobuf.RpcController controller,
26049              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto request,
26050              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto> done) {
26051            channel.callMethod(
26052              getDescriptor().getMethods().get(7),
26053              controller,
26054              request,
26055              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto.getDefaultInstance(),
26056              com.google.protobuf.RpcUtil.generalizeCallback(
26057                done,
26058                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto.class,
26059                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto.getDefaultInstance()));
26060          }
26061    
26062          public  void getJournalState(
26063              com.google.protobuf.RpcController controller,
26064              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
26065              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto> done) {
26066            channel.callMethod(
26067              getDescriptor().getMethods().get(8),
26068              controller,
26069              request,
26070              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance(),
26071              com.google.protobuf.RpcUtil.generalizeCallback(
26072                done,
26073                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.class,
26074                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance()));
26075          }
26076    
26077          public  void newEpoch(
26078              com.google.protobuf.RpcController controller,
26079              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
26080              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto> done) {
26081            channel.callMethod(
26082              getDescriptor().getMethods().get(9),
26083              controller,
26084              request,
26085              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance(),
26086              com.google.protobuf.RpcUtil.generalizeCallback(
26087                done,
26088                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.class,
26089                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance()));
26090          }
26091    
26092          public  void format(
26093              com.google.protobuf.RpcController controller,
26094              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
26095              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto> done) {
26096            channel.callMethod(
26097              getDescriptor().getMethods().get(10),
26098              controller,
26099              request,
26100              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance(),
26101              com.google.protobuf.RpcUtil.generalizeCallback(
26102                done,
26103                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.class,
26104                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance()));
26105          }
26106    
26107          public  void journal(
26108              com.google.protobuf.RpcController controller,
26109              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
26110              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto> done) {
26111            channel.callMethod(
26112              getDescriptor().getMethods().get(11),
26113              controller,
26114              request,
26115              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance(),
26116              com.google.protobuf.RpcUtil.generalizeCallback(
26117                done,
26118                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.class,
26119                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance()));
26120          }
26121    
26122          public  void heartbeat(
26123              com.google.protobuf.RpcController controller,
26124              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
26125              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto> done) {
26126            channel.callMethod(
26127              getDescriptor().getMethods().get(12),
26128              controller,
26129              request,
26130              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance(),
26131              com.google.protobuf.RpcUtil.generalizeCallback(
26132                done,
26133                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.class,
26134                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance()));
26135          }
26136    
26137          public  void startLogSegment(
26138              com.google.protobuf.RpcController controller,
26139              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
26140              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto> done) {
26141            channel.callMethod(
26142              getDescriptor().getMethods().get(13),
26143              controller,
26144              request,
26145              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance(),
26146              com.google.protobuf.RpcUtil.generalizeCallback(
26147                done,
26148                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.class,
26149                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance()));
26150          }
26151    
26152          public  void finalizeLogSegment(
26153              com.google.protobuf.RpcController controller,
26154              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
26155              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto> done) {
26156            channel.callMethod(
26157              getDescriptor().getMethods().get(14),
26158              controller,
26159              request,
26160              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance(),
26161              com.google.protobuf.RpcUtil.generalizeCallback(
26162                done,
26163                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.class,
26164                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance()));
26165          }
26166    
26167          public  void purgeLogs(
26168              com.google.protobuf.RpcController controller,
26169              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
26170              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto> done) {
26171            channel.callMethod(
26172              getDescriptor().getMethods().get(15),
26173              controller,
26174              request,
26175              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance(),
26176              com.google.protobuf.RpcUtil.generalizeCallback(
26177                done,
26178                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.class,
26179                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance()));
26180          }
26181    
26182          public  void getEditLogManifest(
26183              com.google.protobuf.RpcController controller,
26184              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
26185              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto> done) {
26186            channel.callMethod(
26187              getDescriptor().getMethods().get(16),
26188              controller,
26189              request,
26190              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance(),
26191              com.google.protobuf.RpcUtil.generalizeCallback(
26192                done,
26193                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.class,
26194                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance()));
26195          }
26196    
26197          public  void prepareRecovery(
26198              com.google.protobuf.RpcController controller,
26199              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
26200              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto> done) {
26201            channel.callMethod(
26202              getDescriptor().getMethods().get(17),
26203              controller,
26204              request,
26205              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance(),
26206              com.google.protobuf.RpcUtil.generalizeCallback(
26207                done,
26208                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.class,
26209                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance()));
26210          }
26211    
26212          public  void acceptRecovery(
26213              com.google.protobuf.RpcController controller,
26214              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
26215              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto> done) {
26216            channel.callMethod(
26217              getDescriptor().getMethods().get(18),
26218              controller,
26219              request,
26220              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance(),
26221              com.google.protobuf.RpcUtil.generalizeCallback(
26222                done,
26223                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.class,
26224                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance()));
26225          }
26226        }
26227    
26228        public static BlockingInterface newBlockingStub(
26229            com.google.protobuf.BlockingRpcChannel channel) {
26230          return new BlockingStub(channel);
26231        }
26232    
26233        public interface BlockingInterface {
26234          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto isFormatted(
26235              com.google.protobuf.RpcController controller,
26236              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request)
26237              throws com.google.protobuf.ServiceException;
26238    
26239          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto discardSegments(
26240              com.google.protobuf.RpcController controller,
26241              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto request)
26242              throws com.google.protobuf.ServiceException;
26243    
26244          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto getJournalCTime(
26245              com.google.protobuf.RpcController controller,
26246              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto request)
26247              throws com.google.protobuf.ServiceException;
26248    
26249          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto doPreUpgrade(
26250              com.google.protobuf.RpcController controller,
26251              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto request)
26252              throws com.google.protobuf.ServiceException;
26253    
26254          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto doUpgrade(
26255              com.google.protobuf.RpcController controller,
26256              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto request)
26257              throws com.google.protobuf.ServiceException;
26258    
26259          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto doFinalize(
26260              com.google.protobuf.RpcController controller,
26261              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto request)
26262              throws com.google.protobuf.ServiceException;
26263    
26264          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto canRollBack(
26265              com.google.protobuf.RpcController controller,
26266              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto request)
26267              throws com.google.protobuf.ServiceException;
26268    
26269          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto doRollback(
26270              com.google.protobuf.RpcController controller,
26271              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto request)
26272              throws com.google.protobuf.ServiceException;
26273    
26274          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto getJournalState(
26275              com.google.protobuf.RpcController controller,
26276              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request)
26277              throws com.google.protobuf.ServiceException;
26278    
26279          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto newEpoch(
26280              com.google.protobuf.RpcController controller,
26281              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request)
26282              throws com.google.protobuf.ServiceException;
26283    
26284          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto format(
26285              com.google.protobuf.RpcController controller,
26286              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request)
26287              throws com.google.protobuf.ServiceException;
26288    
26289          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto journal(
26290              com.google.protobuf.RpcController controller,
26291              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request)
26292              throws com.google.protobuf.ServiceException;
26293    
26294          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto heartbeat(
26295              com.google.protobuf.RpcController controller,
26296              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request)
26297              throws com.google.protobuf.ServiceException;
26298    
26299          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto startLogSegment(
26300              com.google.protobuf.RpcController controller,
26301              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request)
26302              throws com.google.protobuf.ServiceException;
26303    
26304          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto finalizeLogSegment(
26305              com.google.protobuf.RpcController controller,
26306              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request)
26307              throws com.google.protobuf.ServiceException;
26308    
26309          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto purgeLogs(
26310              com.google.protobuf.RpcController controller,
26311              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request)
26312              throws com.google.protobuf.ServiceException;
26313    
26314          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto getEditLogManifest(
26315              com.google.protobuf.RpcController controller,
26316              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request)
26317              throws com.google.protobuf.ServiceException;
26318    
26319          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto prepareRecovery(
26320              com.google.protobuf.RpcController controller,
26321              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request)
26322              throws com.google.protobuf.ServiceException;
26323    
26324          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto acceptRecovery(
26325              com.google.protobuf.RpcController controller,
26326              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request)
26327              throws com.google.protobuf.ServiceException;
26328        }
26329    
26330        private static final class BlockingStub implements BlockingInterface {
26331          private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
26332            this.channel = channel;
26333          }
26334    
26335          private final com.google.protobuf.BlockingRpcChannel channel;
26336    
26337          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto isFormatted(
26338              com.google.protobuf.RpcController controller,
26339              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request)
26340              throws com.google.protobuf.ServiceException {
26341            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) channel.callBlockingMethod(
26342              getDescriptor().getMethods().get(0),
26343              controller,
26344              request,
26345              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance());
26346          }
26347    
26348    
26349          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto discardSegments(
26350              com.google.protobuf.RpcController controller,
26351              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsRequestProto request)
26352              throws com.google.protobuf.ServiceException {
26353            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto) channel.callBlockingMethod(
26354              getDescriptor().getMethods().get(1),
26355              controller,
26356              request,
26357              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DiscardSegmentsResponseProto.getDefaultInstance());
26358          }
26359    
26360    
26361          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto getJournalCTime(
26362              com.google.protobuf.RpcController controller,
26363              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeRequestProto request)
26364              throws com.google.protobuf.ServiceException {
26365            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto) channel.callBlockingMethod(
26366              getDescriptor().getMethods().get(2),
26367              controller,
26368              request,
26369              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalCTimeResponseProto.getDefaultInstance());
26370          }
26371    
26372    
26373          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto doPreUpgrade(
26374              com.google.protobuf.RpcController controller,
26375              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeRequestProto request)
26376              throws com.google.protobuf.ServiceException {
26377            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto) channel.callBlockingMethod(
26378              getDescriptor().getMethods().get(3),
26379              controller,
26380              request,
26381              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoPreUpgradeResponseProto.getDefaultInstance());
26382          }
26383    
26384    
26385          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto doUpgrade(
26386              com.google.protobuf.RpcController controller,
26387              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeRequestProto request)
26388              throws com.google.protobuf.ServiceException {
26389            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto) channel.callBlockingMethod(
26390              getDescriptor().getMethods().get(4),
26391              controller,
26392              request,
26393              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoUpgradeResponseProto.getDefaultInstance());
26394          }
26395    
26396    
26397          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto doFinalize(
26398              com.google.protobuf.RpcController controller,
26399              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeRequestProto request)
26400              throws com.google.protobuf.ServiceException {
26401            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto) channel.callBlockingMethod(
26402              getDescriptor().getMethods().get(5),
26403              controller,
26404              request,
26405              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoFinalizeResponseProto.getDefaultInstance());
26406          }
26407    
26408    
26409          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto canRollBack(
26410              com.google.protobuf.RpcController controller,
26411              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackRequestProto request)
26412              throws com.google.protobuf.ServiceException {
26413            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto) channel.callBlockingMethod(
26414              getDescriptor().getMethods().get(6),
26415              controller,
26416              request,
26417              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.CanRollBackResponseProto.getDefaultInstance());
26418          }
26419    
26420    
26421          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto doRollback(
26422              com.google.protobuf.RpcController controller,
26423              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackRequestProto request)
26424              throws com.google.protobuf.ServiceException {
26425            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto) channel.callBlockingMethod(
26426              getDescriptor().getMethods().get(7),
26427              controller,
26428              request,
26429              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.DoRollbackResponseProto.getDefaultInstance());
26430          }
26431    
26432    
26433          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto getJournalState(
26434              com.google.protobuf.RpcController controller,
26435              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request)
26436              throws com.google.protobuf.ServiceException {
26437            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) channel.callBlockingMethod(
26438              getDescriptor().getMethods().get(8),
26439              controller,
26440              request,
26441              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance());
26442          }
26443    
26444    
26445          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto newEpoch(
26446              com.google.protobuf.RpcController controller,
26447              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request)
26448              throws com.google.protobuf.ServiceException {
26449            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) channel.callBlockingMethod(
26450              getDescriptor().getMethods().get(9),
26451              controller,
26452              request,
26453              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance());
26454          }
26455    
26456    
26457          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto format(
26458              com.google.protobuf.RpcController controller,
26459              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request)
26460              throws com.google.protobuf.ServiceException {
26461            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) channel.callBlockingMethod(
26462              getDescriptor().getMethods().get(10),
26463              controller,
26464              request,
26465              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance());
26466          }
26467    
26468    
26469          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto journal(
26470              com.google.protobuf.RpcController controller,
26471              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request)
26472              throws com.google.protobuf.ServiceException {
26473            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) channel.callBlockingMethod(
26474              getDescriptor().getMethods().get(11),
26475              controller,
26476              request,
26477              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance());
26478          }
26479    
26480    
26481          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto heartbeat(
26482              com.google.protobuf.RpcController controller,
26483              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request)
26484              throws com.google.protobuf.ServiceException {
26485            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) channel.callBlockingMethod(
26486              getDescriptor().getMethods().get(12),
26487              controller,
26488              request,
26489              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance());
26490          }
26491    
26492    
26493          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto startLogSegment(
26494              com.google.protobuf.RpcController controller,
26495              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request)
26496              throws com.google.protobuf.ServiceException {
26497            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) channel.callBlockingMethod(
26498              getDescriptor().getMethods().get(13),
26499              controller,
26500              request,
26501              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance());
26502          }
26503    
26504    
26505          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto finalizeLogSegment(
26506              com.google.protobuf.RpcController controller,
26507              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request)
26508              throws com.google.protobuf.ServiceException {
26509            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) channel.callBlockingMethod(
26510              getDescriptor().getMethods().get(14),
26511              controller,
26512              request,
26513              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance());
26514          }
26515    
26516    
26517          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto purgeLogs(
26518              com.google.protobuf.RpcController controller,
26519              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request)
26520              throws com.google.protobuf.ServiceException {
26521            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) channel.callBlockingMethod(
26522              getDescriptor().getMethods().get(15),
26523              controller,
26524              request,
26525              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance());
26526          }
26527    
26528    
26529          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto getEditLogManifest(
26530              com.google.protobuf.RpcController controller,
26531              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request)
26532              throws com.google.protobuf.ServiceException {
26533            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) channel.callBlockingMethod(
26534              getDescriptor().getMethods().get(16),
26535              controller,
26536              request,
26537              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance());
26538          }
26539    
26540    
26541          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto prepareRecovery(
26542              com.google.protobuf.RpcController controller,
26543              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request)
26544              throws com.google.protobuf.ServiceException {
26545            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) channel.callBlockingMethod(
26546              getDescriptor().getMethods().get(17),
26547              controller,
26548              request,
26549              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance());
26550          }
26551    
26552    
26553          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto acceptRecovery(
26554              com.google.protobuf.RpcController controller,
26555              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request)
26556              throws com.google.protobuf.ServiceException {
26557            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) channel.callBlockingMethod(
26558              getDescriptor().getMethods().get(18),
26559              controller,
26560              request,
26561              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance());
26562          }
26563    
26564        }
26565    
26566        // @@protoc_insertion_point(class_scope:hadoop.hdfs.qjournal.QJournalProtocolService)
26567      }
26568    
26569      private static com.google.protobuf.Descriptors.Descriptor
26570        internal_static_hadoop_hdfs_qjournal_JournalIdProto_descriptor;
26571      private static
26572        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26573          internal_static_hadoop_hdfs_qjournal_JournalIdProto_fieldAccessorTable;
26574      private static com.google.protobuf.Descriptors.Descriptor
26575        internal_static_hadoop_hdfs_qjournal_RequestInfoProto_descriptor;
26576      private static
26577        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26578          internal_static_hadoop_hdfs_qjournal_RequestInfoProto_fieldAccessorTable;
26579      private static com.google.protobuf.Descriptors.Descriptor
26580        internal_static_hadoop_hdfs_qjournal_SegmentStateProto_descriptor;
26581      private static
26582        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26583          internal_static_hadoop_hdfs_qjournal_SegmentStateProto_fieldAccessorTable;
26584      private static com.google.protobuf.Descriptors.Descriptor
26585        internal_static_hadoop_hdfs_qjournal_PersistedRecoveryPaxosData_descriptor;
26586      private static
26587        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26588          internal_static_hadoop_hdfs_qjournal_PersistedRecoveryPaxosData_fieldAccessorTable;
26589      private static com.google.protobuf.Descriptors.Descriptor
26590        internal_static_hadoop_hdfs_qjournal_JournalRequestProto_descriptor;
26591      private static
26592        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26593          internal_static_hadoop_hdfs_qjournal_JournalRequestProto_fieldAccessorTable;
26594      private static com.google.protobuf.Descriptors.Descriptor
26595        internal_static_hadoop_hdfs_qjournal_JournalResponseProto_descriptor;
26596      private static
26597        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26598          internal_static_hadoop_hdfs_qjournal_JournalResponseProto_fieldAccessorTable;
26599      private static com.google.protobuf.Descriptors.Descriptor
26600        internal_static_hadoop_hdfs_qjournal_HeartbeatRequestProto_descriptor;
26601      private static
26602        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26603          internal_static_hadoop_hdfs_qjournal_HeartbeatRequestProto_fieldAccessorTable;
26604      private static com.google.protobuf.Descriptors.Descriptor
26605        internal_static_hadoop_hdfs_qjournal_HeartbeatResponseProto_descriptor;
26606      private static
26607        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26608          internal_static_hadoop_hdfs_qjournal_HeartbeatResponseProto_fieldAccessorTable;
26609      private static com.google.protobuf.Descriptors.Descriptor
26610        internal_static_hadoop_hdfs_qjournal_StartLogSegmentRequestProto_descriptor;
26611      private static
26612        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26613          internal_static_hadoop_hdfs_qjournal_StartLogSegmentRequestProto_fieldAccessorTable;
26614      private static com.google.protobuf.Descriptors.Descriptor
26615        internal_static_hadoop_hdfs_qjournal_StartLogSegmentResponseProto_descriptor;
26616      private static
26617        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26618          internal_static_hadoop_hdfs_qjournal_StartLogSegmentResponseProto_fieldAccessorTable;
26619      private static com.google.protobuf.Descriptors.Descriptor
26620        internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentRequestProto_descriptor;
26621      private static
26622        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26623          internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentRequestProto_fieldAccessorTable;
26624      private static com.google.protobuf.Descriptors.Descriptor
26625        internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentResponseProto_descriptor;
26626      private static
26627        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26628          internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentResponseProto_fieldAccessorTable;
26629      private static com.google.protobuf.Descriptors.Descriptor
26630        internal_static_hadoop_hdfs_qjournal_PurgeLogsRequestProto_descriptor;
26631      private static
26632        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26633          internal_static_hadoop_hdfs_qjournal_PurgeLogsRequestProto_fieldAccessorTable;
26634      private static com.google.protobuf.Descriptors.Descriptor
26635        internal_static_hadoop_hdfs_qjournal_PurgeLogsResponseProto_descriptor;
26636      private static
26637        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26638          internal_static_hadoop_hdfs_qjournal_PurgeLogsResponseProto_fieldAccessorTable;
26639      private static com.google.protobuf.Descriptors.Descriptor
26640        internal_static_hadoop_hdfs_qjournal_IsFormattedRequestProto_descriptor;
26641      private static
26642        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26643          internal_static_hadoop_hdfs_qjournal_IsFormattedRequestProto_fieldAccessorTable;
26644      private static com.google.protobuf.Descriptors.Descriptor
26645        internal_static_hadoop_hdfs_qjournal_IsFormattedResponseProto_descriptor;
26646      private static
26647        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26648          internal_static_hadoop_hdfs_qjournal_IsFormattedResponseProto_fieldAccessorTable;
26649      private static com.google.protobuf.Descriptors.Descriptor
26650        internal_static_hadoop_hdfs_qjournal_DiscardSegmentsRequestProto_descriptor;
26651      private static
26652        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26653          internal_static_hadoop_hdfs_qjournal_DiscardSegmentsRequestProto_fieldAccessorTable;
26654      private static com.google.protobuf.Descriptors.Descriptor
26655        internal_static_hadoop_hdfs_qjournal_DiscardSegmentsResponseProto_descriptor;
26656      private static
26657        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26658          internal_static_hadoop_hdfs_qjournal_DiscardSegmentsResponseProto_fieldAccessorTable;
26659      private static com.google.protobuf.Descriptors.Descriptor
26660        internal_static_hadoop_hdfs_qjournal_GetJournalCTimeRequestProto_descriptor;
26661      private static
26662        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26663          internal_static_hadoop_hdfs_qjournal_GetJournalCTimeRequestProto_fieldAccessorTable;
26664      private static com.google.protobuf.Descriptors.Descriptor
26665        internal_static_hadoop_hdfs_qjournal_GetJournalCTimeResponseProto_descriptor;
26666      private static
26667        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26668          internal_static_hadoop_hdfs_qjournal_GetJournalCTimeResponseProto_fieldAccessorTable;
26669      private static com.google.protobuf.Descriptors.Descriptor
26670        internal_static_hadoop_hdfs_qjournal_DoPreUpgradeRequestProto_descriptor;
26671      private static
26672        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26673          internal_static_hadoop_hdfs_qjournal_DoPreUpgradeRequestProto_fieldAccessorTable;
26674      private static com.google.protobuf.Descriptors.Descriptor
26675        internal_static_hadoop_hdfs_qjournal_DoPreUpgradeResponseProto_descriptor;
26676      private static
26677        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26678          internal_static_hadoop_hdfs_qjournal_DoPreUpgradeResponseProto_fieldAccessorTable;
26679      private static com.google.protobuf.Descriptors.Descriptor
26680        internal_static_hadoop_hdfs_qjournal_DoUpgradeRequestProto_descriptor;
26681      private static
26682        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26683          internal_static_hadoop_hdfs_qjournal_DoUpgradeRequestProto_fieldAccessorTable;
26684      private static com.google.protobuf.Descriptors.Descriptor
26685        internal_static_hadoop_hdfs_qjournal_DoUpgradeResponseProto_descriptor;
26686      private static
26687        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26688          internal_static_hadoop_hdfs_qjournal_DoUpgradeResponseProto_fieldAccessorTable;
26689      private static com.google.protobuf.Descriptors.Descriptor
26690        internal_static_hadoop_hdfs_qjournal_DoFinalizeRequestProto_descriptor;
26691      private static
26692        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26693          internal_static_hadoop_hdfs_qjournal_DoFinalizeRequestProto_fieldAccessorTable;
26694      private static com.google.protobuf.Descriptors.Descriptor
26695        internal_static_hadoop_hdfs_qjournal_DoFinalizeResponseProto_descriptor;
26696      private static
26697        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26698          internal_static_hadoop_hdfs_qjournal_DoFinalizeResponseProto_fieldAccessorTable;
26699      private static com.google.protobuf.Descriptors.Descriptor
26700        internal_static_hadoop_hdfs_qjournal_CanRollBackRequestProto_descriptor;
26701      private static
26702        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26703          internal_static_hadoop_hdfs_qjournal_CanRollBackRequestProto_fieldAccessorTable;
26704      private static com.google.protobuf.Descriptors.Descriptor
26705        internal_static_hadoop_hdfs_qjournal_CanRollBackResponseProto_descriptor;
26706      private static
26707        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26708          internal_static_hadoop_hdfs_qjournal_CanRollBackResponseProto_fieldAccessorTable;
26709      private static com.google.protobuf.Descriptors.Descriptor
26710        internal_static_hadoop_hdfs_qjournal_DoRollbackRequestProto_descriptor;
26711      private static
26712        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26713          internal_static_hadoop_hdfs_qjournal_DoRollbackRequestProto_fieldAccessorTable;
26714      private static com.google.protobuf.Descriptors.Descriptor
26715        internal_static_hadoop_hdfs_qjournal_DoRollbackResponseProto_descriptor;
26716      private static
26717        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26718          internal_static_hadoop_hdfs_qjournal_DoRollbackResponseProto_fieldAccessorTable;
26719      private static com.google.protobuf.Descriptors.Descriptor
26720        internal_static_hadoop_hdfs_qjournal_GetJournalStateRequestProto_descriptor;
26721      private static
26722        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26723          internal_static_hadoop_hdfs_qjournal_GetJournalStateRequestProto_fieldAccessorTable;
26724      private static com.google.protobuf.Descriptors.Descriptor
26725        internal_static_hadoop_hdfs_qjournal_GetJournalStateResponseProto_descriptor;
26726      private static
26727        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26728          internal_static_hadoop_hdfs_qjournal_GetJournalStateResponseProto_fieldAccessorTable;
26729      private static com.google.protobuf.Descriptors.Descriptor
26730        internal_static_hadoop_hdfs_qjournal_FormatRequestProto_descriptor;
26731      private static
26732        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26733          internal_static_hadoop_hdfs_qjournal_FormatRequestProto_fieldAccessorTable;
26734      private static com.google.protobuf.Descriptors.Descriptor
26735        internal_static_hadoop_hdfs_qjournal_FormatResponseProto_descriptor;
26736      private static
26737        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26738          internal_static_hadoop_hdfs_qjournal_FormatResponseProto_fieldAccessorTable;
26739      private static com.google.protobuf.Descriptors.Descriptor
26740        internal_static_hadoop_hdfs_qjournal_NewEpochRequestProto_descriptor;
26741      private static
26742        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26743          internal_static_hadoop_hdfs_qjournal_NewEpochRequestProto_fieldAccessorTable;
26744      private static com.google.protobuf.Descriptors.Descriptor
26745        internal_static_hadoop_hdfs_qjournal_NewEpochResponseProto_descriptor;
26746      private static
26747        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26748          internal_static_hadoop_hdfs_qjournal_NewEpochResponseProto_fieldAccessorTable;
26749      private static com.google.protobuf.Descriptors.Descriptor
26750        internal_static_hadoop_hdfs_qjournal_GetEditLogManifestRequestProto_descriptor;
26751      private static
26752        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26753          internal_static_hadoop_hdfs_qjournal_GetEditLogManifestRequestProto_fieldAccessorTable;
26754      private static com.google.protobuf.Descriptors.Descriptor
26755        internal_static_hadoop_hdfs_qjournal_GetEditLogManifestResponseProto_descriptor;
26756      private static
26757        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26758          internal_static_hadoop_hdfs_qjournal_GetEditLogManifestResponseProto_fieldAccessorTable;
26759      private static com.google.protobuf.Descriptors.Descriptor
26760        internal_static_hadoop_hdfs_qjournal_PrepareRecoveryRequestProto_descriptor;
26761      private static
26762        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26763          internal_static_hadoop_hdfs_qjournal_PrepareRecoveryRequestProto_fieldAccessorTable;
26764      private static com.google.protobuf.Descriptors.Descriptor
26765        internal_static_hadoop_hdfs_qjournal_PrepareRecoveryResponseProto_descriptor;
26766      private static
26767        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26768          internal_static_hadoop_hdfs_qjournal_PrepareRecoveryResponseProto_fieldAccessorTable;
26769      private static com.google.protobuf.Descriptors.Descriptor
26770        internal_static_hadoop_hdfs_qjournal_AcceptRecoveryRequestProto_descriptor;
26771      private static
26772        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26773          internal_static_hadoop_hdfs_qjournal_AcceptRecoveryRequestProto_fieldAccessorTable;
26774      private static com.google.protobuf.Descriptors.Descriptor
26775        internal_static_hadoop_hdfs_qjournal_AcceptRecoveryResponseProto_descriptor;
26776      private static
26777        com.google.protobuf.GeneratedMessage.FieldAccessorTable
26778          internal_static_hadoop_hdfs_qjournal_AcceptRecoveryResponseProto_fieldAccessorTable;
26779    
26780      public static com.google.protobuf.Descriptors.FileDescriptor
26781          getDescriptor() {
26782        return descriptor;
26783      }
26784      private static com.google.protobuf.Descriptors.FileDescriptor
26785          descriptor;
26786      static {
26787        java.lang.String[] descriptorData = {
26788          "\n\026QJournalProtocol.proto\022\024hadoop.hdfs.qj" +
26789          "ournal\032\nhdfs.proto\"$\n\016JournalIdProto\022\022\n\n" +
26790          "identifier\030\001 \002(\t\"\212\001\n\020RequestInfoProto\0227\n" +
26791          "\tjournalId\030\001 \002(\0132$.hadoop.hdfs.qjournal." +
26792          "JournalIdProto\022\r\n\005epoch\030\002 \002(\004\022\027\n\017ipcSeri" +
26793          "alNumber\030\003 \002(\004\022\025\n\rcommittedTxId\030\004 \001(\004\"M\n" +
26794          "\021SegmentStateProto\022\021\n\tstartTxId\030\001 \002(\004\022\017\n" +
26795          "\007endTxId\030\002 \002(\004\022\024\n\014isInProgress\030\003 \002(\010\"t\n\032" +
26796          "PersistedRecoveryPaxosData\022=\n\014segmentSta" +
26797          "te\030\001 \002(\0132\'.hadoop.hdfs.qjournal.SegmentS",
26798          "tateProto\022\027\n\017acceptedInEpoch\030\002 \002(\004\"\232\001\n\023J" +
26799          "ournalRequestProto\0227\n\007reqInfo\030\001 \002(\0132&.ha" +
26800          "doop.hdfs.qjournal.RequestInfoProto\022\022\n\nf" +
26801          "irstTxnId\030\002 \002(\004\022\017\n\007numTxns\030\003 \002(\r\022\017\n\007reco" +
26802          "rds\030\004 \002(\014\022\024\n\014segmentTxnId\030\005 \002(\004\"\026\n\024Journ" +
26803          "alResponseProto\"P\n\025HeartbeatRequestProto" +
26804          "\0227\n\007reqInfo\030\001 \002(\0132&.hadoop.hdfs.qjournal" +
26805          ".RequestInfoProto\"\030\n\026HeartbeatResponsePr" +
26806          "oto\"{\n\033StartLogSegmentRequestProto\0227\n\007re" +
26807          "qInfo\030\001 \002(\0132&.hadoop.hdfs.qjournal.Reque",
26808          "stInfoProto\022\014\n\004txid\030\002 \002(\004\022\025\n\rlayoutVersi" +
26809          "on\030\003 \001(\021\"\036\n\034StartLogSegmentResponseProto" +
26810          "\"}\n\036FinalizeLogSegmentRequestProto\0227\n\007re" +
26811          "qInfo\030\001 \002(\0132&.hadoop.hdfs.qjournal.Reque" +
26812          "stInfoProto\022\021\n\tstartTxId\030\002 \002(\004\022\017\n\007endTxI" +
26813          "d\030\003 \002(\004\"!\n\037FinalizeLogSegmentResponsePro" +
26814          "to\"g\n\025PurgeLogsRequestProto\0227\n\007reqInfo\030\001" +
26815          " \002(\0132&.hadoop.hdfs.qjournal.RequestInfoP" +
26816          "roto\022\025\n\rminTxIdToKeep\030\002 \002(\004\"\030\n\026PurgeLogs" +
26817          "ResponseProto\"L\n\027IsFormattedRequestProto",
26818          "\0221\n\003jid\030\001 \002(\0132$.hadoop.hdfs.qjournal.Jou" +
26819          "rnalIdProto\"/\n\030IsFormattedResponseProto\022" +
26820          "\023\n\013isFormatted\030\001 \002(\010\"c\n\033DiscardSegmentsR" +
26821          "equestProto\0221\n\003jid\030\001 \002(\0132$.hadoop.hdfs.q" +
26822          "journal.JournalIdProto\022\021\n\tstartTxId\030\002 \002(" +
26823          "\004\"\036\n\034DiscardSegmentsResponseProto\"P\n\033Get" +
26824          "JournalCTimeRequestProto\0221\n\003jid\030\001 \002(\0132$." +
26825          "hadoop.hdfs.qjournal.JournalIdProto\"3\n\034G" +
26826          "etJournalCTimeResponseProto\022\023\n\013resultCTi" +
26827          "me\030\001 \002(\003\"M\n\030DoPreUpgradeRequestProto\0221\n\003",
26828          "jid\030\001 \002(\0132$.hadoop.hdfs.qjournal.Journal" +
26829          "IdProto\"\033\n\031DoPreUpgradeResponseProto\"x\n\025" +
26830          "DoUpgradeRequestProto\0221\n\003jid\030\001 \002(\0132$.had" +
26831          "oop.hdfs.qjournal.JournalIdProto\022,\n\005sInf" +
26832          "o\030\002 \002(\0132\035.hadoop.hdfs.StorageInfoProto\"\030" +
26833          "\n\026DoUpgradeResponseProto\"K\n\026DoFinalizeRe" +
26834          "questProto\0221\n\003jid\030\001 \002(\0132$.hadoop.hdfs.qj" +
26835          "ournal.JournalIdProto\"\031\n\027DoFinalizeRespo" +
26836          "nseProto\"\315\001\n\027CanRollBackRequestProto\0221\n\003" +
26837          "jid\030\001 \002(\0132$.hadoop.hdfs.qjournal.Journal",
26838          "IdProto\022.\n\007storage\030\002 \002(\0132\035.hadoop.hdfs.S" +
26839          "torageInfoProto\0222\n\013prevStorage\030\003 \002(\0132\035.h" +
26840          "adoop.hdfs.StorageInfoProto\022\033\n\023targetLay" +
26841          "outVersion\030\004 \002(\005\"/\n\030CanRollBackResponseP" +
26842          "roto\022\023\n\013canRollBack\030\001 \002(\010\"K\n\026DoRollbackR" +
26843          "equestProto\0221\n\003jid\030\001 \002(\0132$.hadoop.hdfs.q" +
26844          "journal.JournalIdProto\"\031\n\027DoRollbackResp" +
26845          "onseProto\"P\n\033GetJournalStateRequestProto" +
26846          "\0221\n\003jid\030\001 \002(\0132$.hadoop.hdfs.qjournal.Jou" +
26847          "rnalIdProto\"\\\n\034GetJournalStateResponsePr",
26848          "oto\022\031\n\021lastPromisedEpoch\030\001 \002(\004\022\020\n\010httpPo" +
26849          "rt\030\002 \002(\r\022\017\n\007fromURL\030\003 \001(\t\"x\n\022FormatReque" +
26850          "stProto\0221\n\003jid\030\001 \002(\0132$.hadoop.hdfs.qjour" +
26851          "nal.JournalIdProto\022/\n\006nsInfo\030\002 \002(\0132\037.had" +
26852          "oop.hdfs.NamespaceInfoProto\"\025\n\023FormatRes" +
26853          "ponseProto\"\211\001\n\024NewEpochRequestProto\0221\n\003j" +
26854          "id\030\001 \002(\0132$.hadoop.hdfs.qjournal.JournalI" +
26855          "dProto\022/\n\006nsInfo\030\002 \002(\0132\037.hadoop.hdfs.Nam" +
26856          "espaceInfoProto\022\r\n\005epoch\030\003 \002(\004\"0\n\025NewEpo" +
26857          "chResponseProto\022\027\n\017lastSegmentTxId\030\001 \001(\004",
26858          "\"\203\001\n\036GetEditLogManifestRequestProto\0221\n\003j" +
26859          "id\030\001 \002(\0132$.hadoop.hdfs.qjournal.JournalI" +
26860          "dProto\022\021\n\tsinceTxId\030\002 \002(\004\022\033\n\014inProgressO" +
26861          "k\030\004 \001(\010:\005false\"\177\n\037GetEditLogManifestResp" +
26862          "onseProto\0229\n\010manifest\030\001 \002(\0132\'.hadoop.hdf" +
26863          "s.RemoteEditLogManifestProto\022\020\n\010httpPort" +
26864          "\030\002 \002(\r\022\017\n\007fromURL\030\003 \001(\t\"k\n\033PrepareRecove" +
26865          "ryRequestProto\0227\n\007reqInfo\030\001 \002(\0132&.hadoop" +
26866          ".hdfs.qjournal.RequestInfoProto\022\023\n\013segme" +
26867          "ntTxId\030\002 \002(\004\"\252\001\n\034PrepareRecoveryResponse",
26868          "Proto\022=\n\014segmentState\030\001 \001(\0132\'.hadoop.hdf" +
26869          "s.qjournal.SegmentStateProto\022\027\n\017accepted" +
26870          "InEpoch\030\002 \001(\004\022\027\n\017lastWriterEpoch\030\003 \002(\004\022\031" +
26871          "\n\021lastCommittedTxId\030\004 \001(\004\"\246\001\n\032AcceptReco" +
26872          "veryRequestProto\0227\n\007reqInfo\030\001 \002(\0132&.hado" +
26873          "op.hdfs.qjournal.RequestInfoProto\022>\n\rsta" +
26874          "teToAccept\030\002 \002(\0132\'.hadoop.hdfs.qjournal." +
26875          "SegmentStateProto\022\017\n\007fromURL\030\003 \002(\t\"\035\n\033Ac" +
26876          "ceptRecoveryResponseProto2\373\020\n\027QJournalPr" +
26877          "otocolService\022l\n\013isFormatted\022-.hadoop.hd",
26878          "fs.qjournal.IsFormattedRequestProto\032..ha" +
26879          "doop.hdfs.qjournal.IsFormattedResponsePr" +
26880          "oto\022x\n\017discardSegments\0221.hadoop.hdfs.qjo" +
26881          "urnal.DiscardSegmentsRequestProto\0322.hado" +
26882          "op.hdfs.qjournal.DiscardSegmentsResponse" +
26883          "Proto\022x\n\017getJournalCTime\0221.hadoop.hdfs.q" +
26884          "journal.GetJournalCTimeRequestProto\0322.ha" +
26885          "doop.hdfs.qjournal.GetJournalCTimeRespon" +
26886          "seProto\022o\n\014doPreUpgrade\022..hadoop.hdfs.qj" +
26887          "ournal.DoPreUpgradeRequestProto\032/.hadoop",
26888          ".hdfs.qjournal.DoPreUpgradeResponseProto" +
26889          "\022f\n\tdoUpgrade\022+.hadoop.hdfs.qjournal.DoU" +
26890          "pgradeRequestProto\032,.hadoop.hdfs.qjourna" +
26891          "l.DoUpgradeResponseProto\022i\n\ndoFinalize\022," +
26892          ".hadoop.hdfs.qjournal.DoFinalizeRequestP" +
26893          "roto\032-.hadoop.hdfs.qjournal.DoFinalizeRe" +
26894          "sponseProto\022l\n\013canRollBack\022-.hadoop.hdfs" +
26895          ".qjournal.CanRollBackRequestProto\032..hado" +
26896          "op.hdfs.qjournal.CanRollBackResponseProt" +
26897          "o\022i\n\ndoRollback\022,.hadoop.hdfs.qjournal.D",
26898          "oRollbackRequestProto\032-.hadoop.hdfs.qjou" +
26899          "rnal.DoRollbackResponseProto\022x\n\017getJourn" +
26900          "alState\0221.hadoop.hdfs.qjournal.GetJourna" +
26901          "lStateRequestProto\0322.hadoop.hdfs.qjourna" +
26902          "l.GetJournalStateResponseProto\022c\n\010newEpo" +
26903          "ch\022*.hadoop.hdfs.qjournal.NewEpochReques" +
26904          "tProto\032+.hadoop.hdfs.qjournal.NewEpochRe" +
26905          "sponseProto\022]\n\006format\022(.hadoop.hdfs.qjou" +
26906          "rnal.FormatRequestProto\032).hadoop.hdfs.qj" +
26907          "ournal.FormatResponseProto\022`\n\007journal\022).",
26908          "hadoop.hdfs.qjournal.JournalRequestProto" +
26909          "\032*.hadoop.hdfs.qjournal.JournalResponseP" +
26910          "roto\022f\n\theartbeat\022+.hadoop.hdfs.qjournal" +
26911          ".HeartbeatRequestProto\032,.hadoop.hdfs.qjo" +
26912          "urnal.HeartbeatResponseProto\022x\n\017startLog" +
26913          "Segment\0221.hadoop.hdfs.qjournal.StartLogS" +
26914          "egmentRequestProto\0322.hadoop.hdfs.qjourna" +
26915          "l.StartLogSegmentResponseProto\022\201\001\n\022final" +
26916          "izeLogSegment\0224.hadoop.hdfs.qjournal.Fin" +
26917          "alizeLogSegmentRequestProto\0325.hadoop.hdf",
26918          "s.qjournal.FinalizeLogSegmentResponsePro" +
26919          "to\022f\n\tpurgeLogs\022+.hadoop.hdfs.qjournal.P" +
26920          "urgeLogsRequestProto\032,.hadoop.hdfs.qjour" +
26921          "nal.PurgeLogsResponseProto\022\201\001\n\022getEditLo" +
26922          "gManifest\0224.hadoop.hdfs.qjournal.GetEdit" +
26923          "LogManifestRequestProto\0325.hadoop.hdfs.qj" +
26924          "ournal.GetEditLogManifestResponseProto\022x" +
26925          "\n\017prepareRecovery\0221.hadoop.hdfs.qjournal" +
26926          ".PrepareRecoveryRequestProto\0322.hadoop.hd" +
26927          "fs.qjournal.PrepareRecoveryResponseProto",
26928          "\022u\n\016acceptRecovery\0220.hadoop.hdfs.qjourna" +
26929          "l.AcceptRecoveryRequestProto\0321.hadoop.hd" +
26930          "fs.qjournal.AcceptRecoveryResponseProtoB" +
26931          "H\n(org.apache.hadoop.hdfs.qjournal.proto" +
26932          "colB\026QJournalProtocolProtos\210\001\001\240\001\001"
26933        };
26934        com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
26935          new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
26936            public com.google.protobuf.ExtensionRegistry assignDescriptors(
26937                com.google.protobuf.Descriptors.FileDescriptor root) {
26938              descriptor = root;
26939              internal_static_hadoop_hdfs_qjournal_JournalIdProto_descriptor =
26940                getDescriptor().getMessageTypes().get(0);
26941              internal_static_hadoop_hdfs_qjournal_JournalIdProto_fieldAccessorTable = new
26942                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
26943                  internal_static_hadoop_hdfs_qjournal_JournalIdProto_descriptor,
26944                  new java.lang.String[] { "Identifier", });
26945              internal_static_hadoop_hdfs_qjournal_RequestInfoProto_descriptor =
26946                getDescriptor().getMessageTypes().get(1);
26947              internal_static_hadoop_hdfs_qjournal_RequestInfoProto_fieldAccessorTable = new
26948                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
26949                  internal_static_hadoop_hdfs_qjournal_RequestInfoProto_descriptor,
26950                  new java.lang.String[] { "JournalId", "Epoch", "IpcSerialNumber", "CommittedTxId", });
26951              internal_static_hadoop_hdfs_qjournal_SegmentStateProto_descriptor =
26952                getDescriptor().getMessageTypes().get(2);
26953              internal_static_hadoop_hdfs_qjournal_SegmentStateProto_fieldAccessorTable = new
26954                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
26955                  internal_static_hadoop_hdfs_qjournal_SegmentStateProto_descriptor,
26956                  new java.lang.String[] { "StartTxId", "EndTxId", "IsInProgress", });
26957              internal_static_hadoop_hdfs_qjournal_PersistedRecoveryPaxosData_descriptor =
26958                getDescriptor().getMessageTypes().get(3);
26959              internal_static_hadoop_hdfs_qjournal_PersistedRecoveryPaxosData_fieldAccessorTable = new
26960                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
26961                  internal_static_hadoop_hdfs_qjournal_PersistedRecoveryPaxosData_descriptor,
26962                  new java.lang.String[] { "SegmentState", "AcceptedInEpoch", });
26963              internal_static_hadoop_hdfs_qjournal_JournalRequestProto_descriptor =
26964                getDescriptor().getMessageTypes().get(4);
26965              internal_static_hadoop_hdfs_qjournal_JournalRequestProto_fieldAccessorTable = new
26966                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
26967                  internal_static_hadoop_hdfs_qjournal_JournalRequestProto_descriptor,
26968                  new java.lang.String[] { "ReqInfo", "FirstTxnId", "NumTxns", "Records", "SegmentTxnId", });
26969              internal_static_hadoop_hdfs_qjournal_JournalResponseProto_descriptor =
26970                getDescriptor().getMessageTypes().get(5);
26971              internal_static_hadoop_hdfs_qjournal_JournalResponseProto_fieldAccessorTable = new
26972                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
26973                  internal_static_hadoop_hdfs_qjournal_JournalResponseProto_descriptor,
26974                  new java.lang.String[] { });
26975              internal_static_hadoop_hdfs_qjournal_HeartbeatRequestProto_descriptor =
26976                getDescriptor().getMessageTypes().get(6);
26977              internal_static_hadoop_hdfs_qjournal_HeartbeatRequestProto_fieldAccessorTable = new
26978                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
26979                  internal_static_hadoop_hdfs_qjournal_HeartbeatRequestProto_descriptor,
26980                  new java.lang.String[] { "ReqInfo", });
26981              internal_static_hadoop_hdfs_qjournal_HeartbeatResponseProto_descriptor =
26982                getDescriptor().getMessageTypes().get(7);
26983              internal_static_hadoop_hdfs_qjournal_HeartbeatResponseProto_fieldAccessorTable = new
26984                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
26985                  internal_static_hadoop_hdfs_qjournal_HeartbeatResponseProto_descriptor,
26986                  new java.lang.String[] { });
26987              internal_static_hadoop_hdfs_qjournal_StartLogSegmentRequestProto_descriptor =
26988                getDescriptor().getMessageTypes().get(8);
26989              internal_static_hadoop_hdfs_qjournal_StartLogSegmentRequestProto_fieldAccessorTable = new
26990                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
26991                  internal_static_hadoop_hdfs_qjournal_StartLogSegmentRequestProto_descriptor,
26992                  new java.lang.String[] { "ReqInfo", "Txid", "LayoutVersion", });
26993              internal_static_hadoop_hdfs_qjournal_StartLogSegmentResponseProto_descriptor =
26994                getDescriptor().getMessageTypes().get(9);
26995              internal_static_hadoop_hdfs_qjournal_StartLogSegmentResponseProto_fieldAccessorTable = new
26996                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
26997                  internal_static_hadoop_hdfs_qjournal_StartLogSegmentResponseProto_descriptor,
26998                  new java.lang.String[] { });
26999              internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentRequestProto_descriptor =
27000                getDescriptor().getMessageTypes().get(10);
27001              internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentRequestProto_fieldAccessorTable = new
27002                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27003                  internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentRequestProto_descriptor,
27004                  new java.lang.String[] { "ReqInfo", "StartTxId", "EndTxId", });
27005              internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentResponseProto_descriptor =
27006                getDescriptor().getMessageTypes().get(11);
27007              internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentResponseProto_fieldAccessorTable = new
27008                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27009                  internal_static_hadoop_hdfs_qjournal_FinalizeLogSegmentResponseProto_descriptor,
27010                  new java.lang.String[] { });
27011              internal_static_hadoop_hdfs_qjournal_PurgeLogsRequestProto_descriptor =
27012                getDescriptor().getMessageTypes().get(12);
27013              internal_static_hadoop_hdfs_qjournal_PurgeLogsRequestProto_fieldAccessorTable = new
27014                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27015                  internal_static_hadoop_hdfs_qjournal_PurgeLogsRequestProto_descriptor,
27016                  new java.lang.String[] { "ReqInfo", "MinTxIdToKeep", });
27017              internal_static_hadoop_hdfs_qjournal_PurgeLogsResponseProto_descriptor =
27018                getDescriptor().getMessageTypes().get(13);
27019              internal_static_hadoop_hdfs_qjournal_PurgeLogsResponseProto_fieldAccessorTable = new
27020                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27021                  internal_static_hadoop_hdfs_qjournal_PurgeLogsResponseProto_descriptor,
27022                  new java.lang.String[] { });
27023              internal_static_hadoop_hdfs_qjournal_IsFormattedRequestProto_descriptor =
27024                getDescriptor().getMessageTypes().get(14);
27025              internal_static_hadoop_hdfs_qjournal_IsFormattedRequestProto_fieldAccessorTable = new
27026                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27027                  internal_static_hadoop_hdfs_qjournal_IsFormattedRequestProto_descriptor,
27028                  new java.lang.String[] { "Jid", });
27029              internal_static_hadoop_hdfs_qjournal_IsFormattedResponseProto_descriptor =
27030                getDescriptor().getMessageTypes().get(15);
27031              internal_static_hadoop_hdfs_qjournal_IsFormattedResponseProto_fieldAccessorTable = new
27032                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27033                  internal_static_hadoop_hdfs_qjournal_IsFormattedResponseProto_descriptor,
27034                  new java.lang.String[] { "IsFormatted", });
27035              internal_static_hadoop_hdfs_qjournal_DiscardSegmentsRequestProto_descriptor =
27036                getDescriptor().getMessageTypes().get(16);
27037              internal_static_hadoop_hdfs_qjournal_DiscardSegmentsRequestProto_fieldAccessorTable = new
27038                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27039                  internal_static_hadoop_hdfs_qjournal_DiscardSegmentsRequestProto_descriptor,
27040                  new java.lang.String[] { "Jid", "StartTxId", });
27041              internal_static_hadoop_hdfs_qjournal_DiscardSegmentsResponseProto_descriptor =
27042                getDescriptor().getMessageTypes().get(17);
27043              internal_static_hadoop_hdfs_qjournal_DiscardSegmentsResponseProto_fieldAccessorTable = new
27044                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27045                  internal_static_hadoop_hdfs_qjournal_DiscardSegmentsResponseProto_descriptor,
27046                  new java.lang.String[] { });
27047              internal_static_hadoop_hdfs_qjournal_GetJournalCTimeRequestProto_descriptor =
27048                getDescriptor().getMessageTypes().get(18);
27049              internal_static_hadoop_hdfs_qjournal_GetJournalCTimeRequestProto_fieldAccessorTable = new
27050                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27051                  internal_static_hadoop_hdfs_qjournal_GetJournalCTimeRequestProto_descriptor,
27052                  new java.lang.String[] { "Jid", });
27053              internal_static_hadoop_hdfs_qjournal_GetJournalCTimeResponseProto_descriptor =
27054                getDescriptor().getMessageTypes().get(19);
27055              internal_static_hadoop_hdfs_qjournal_GetJournalCTimeResponseProto_fieldAccessorTable = new
27056                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27057                  internal_static_hadoop_hdfs_qjournal_GetJournalCTimeResponseProto_descriptor,
27058                  new java.lang.String[] { "ResultCTime", });
27059              internal_static_hadoop_hdfs_qjournal_DoPreUpgradeRequestProto_descriptor =
27060                getDescriptor().getMessageTypes().get(20);
27061              internal_static_hadoop_hdfs_qjournal_DoPreUpgradeRequestProto_fieldAccessorTable = new
27062                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27063                  internal_static_hadoop_hdfs_qjournal_DoPreUpgradeRequestProto_descriptor,
27064                  new java.lang.String[] { "Jid", });
27065              internal_static_hadoop_hdfs_qjournal_DoPreUpgradeResponseProto_descriptor =
27066                getDescriptor().getMessageTypes().get(21);
27067              internal_static_hadoop_hdfs_qjournal_DoPreUpgradeResponseProto_fieldAccessorTable = new
27068                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27069                  internal_static_hadoop_hdfs_qjournal_DoPreUpgradeResponseProto_descriptor,
27070                  new java.lang.String[] { });
27071              internal_static_hadoop_hdfs_qjournal_DoUpgradeRequestProto_descriptor =
27072                getDescriptor().getMessageTypes().get(22);
27073              internal_static_hadoop_hdfs_qjournal_DoUpgradeRequestProto_fieldAccessorTable = new
27074                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27075                  internal_static_hadoop_hdfs_qjournal_DoUpgradeRequestProto_descriptor,
27076                  new java.lang.String[] { "Jid", "SInfo", });
27077              internal_static_hadoop_hdfs_qjournal_DoUpgradeResponseProto_descriptor =
27078                getDescriptor().getMessageTypes().get(23);
27079              internal_static_hadoop_hdfs_qjournal_DoUpgradeResponseProto_fieldAccessorTable = new
27080                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27081                  internal_static_hadoop_hdfs_qjournal_DoUpgradeResponseProto_descriptor,
27082                  new java.lang.String[] { });
27083              internal_static_hadoop_hdfs_qjournal_DoFinalizeRequestProto_descriptor =
27084                getDescriptor().getMessageTypes().get(24);
27085              internal_static_hadoop_hdfs_qjournal_DoFinalizeRequestProto_fieldAccessorTable = new
27086                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27087                  internal_static_hadoop_hdfs_qjournal_DoFinalizeRequestProto_descriptor,
27088                  new java.lang.String[] { "Jid", });
27089              internal_static_hadoop_hdfs_qjournal_DoFinalizeResponseProto_descriptor =
27090                getDescriptor().getMessageTypes().get(25);
27091              internal_static_hadoop_hdfs_qjournal_DoFinalizeResponseProto_fieldAccessorTable = new
27092                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27093                  internal_static_hadoop_hdfs_qjournal_DoFinalizeResponseProto_descriptor,
27094                  new java.lang.String[] { });
27095              internal_static_hadoop_hdfs_qjournal_CanRollBackRequestProto_descriptor =
27096                getDescriptor().getMessageTypes().get(26);
27097              internal_static_hadoop_hdfs_qjournal_CanRollBackRequestProto_fieldAccessorTable = new
27098                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27099                  internal_static_hadoop_hdfs_qjournal_CanRollBackRequestProto_descriptor,
27100                  new java.lang.String[] { "Jid", "Storage", "PrevStorage", "TargetLayoutVersion", });
27101              internal_static_hadoop_hdfs_qjournal_CanRollBackResponseProto_descriptor =
27102                getDescriptor().getMessageTypes().get(27);
27103              internal_static_hadoop_hdfs_qjournal_CanRollBackResponseProto_fieldAccessorTable = new
27104                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27105                  internal_static_hadoop_hdfs_qjournal_CanRollBackResponseProto_descriptor,
27106                  new java.lang.String[] { "CanRollBack", });
27107              internal_static_hadoop_hdfs_qjournal_DoRollbackRequestProto_descriptor =
27108                getDescriptor().getMessageTypes().get(28);
27109              internal_static_hadoop_hdfs_qjournal_DoRollbackRequestProto_fieldAccessorTable = new
27110                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27111                  internal_static_hadoop_hdfs_qjournal_DoRollbackRequestProto_descriptor,
27112                  new java.lang.String[] { "Jid", });
27113              internal_static_hadoop_hdfs_qjournal_DoRollbackResponseProto_descriptor =
27114                getDescriptor().getMessageTypes().get(29);
27115              internal_static_hadoop_hdfs_qjournal_DoRollbackResponseProto_fieldAccessorTable = new
27116                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27117                  internal_static_hadoop_hdfs_qjournal_DoRollbackResponseProto_descriptor,
27118                  new java.lang.String[] { });
27119              internal_static_hadoop_hdfs_qjournal_GetJournalStateRequestProto_descriptor =
27120                getDescriptor().getMessageTypes().get(30);
27121              internal_static_hadoop_hdfs_qjournal_GetJournalStateRequestProto_fieldAccessorTable = new
27122                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27123                  internal_static_hadoop_hdfs_qjournal_GetJournalStateRequestProto_descriptor,
27124                  new java.lang.String[] { "Jid", });
27125              internal_static_hadoop_hdfs_qjournal_GetJournalStateResponseProto_descriptor =
27126                getDescriptor().getMessageTypes().get(31);
27127              internal_static_hadoop_hdfs_qjournal_GetJournalStateResponseProto_fieldAccessorTable = new
27128                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27129                  internal_static_hadoop_hdfs_qjournal_GetJournalStateResponseProto_descriptor,
27130                  new java.lang.String[] { "LastPromisedEpoch", "HttpPort", "FromURL", });
27131              internal_static_hadoop_hdfs_qjournal_FormatRequestProto_descriptor =
27132                getDescriptor().getMessageTypes().get(32);
27133              internal_static_hadoop_hdfs_qjournal_FormatRequestProto_fieldAccessorTable = new
27134                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27135                  internal_static_hadoop_hdfs_qjournal_FormatRequestProto_descriptor,
27136                  new java.lang.String[] { "Jid", "NsInfo", });
27137              internal_static_hadoop_hdfs_qjournal_FormatResponseProto_descriptor =
27138                getDescriptor().getMessageTypes().get(33);
27139              internal_static_hadoop_hdfs_qjournal_FormatResponseProto_fieldAccessorTable = new
27140                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27141                  internal_static_hadoop_hdfs_qjournal_FormatResponseProto_descriptor,
27142                  new java.lang.String[] { });
27143              internal_static_hadoop_hdfs_qjournal_NewEpochRequestProto_descriptor =
27144                getDescriptor().getMessageTypes().get(34);
27145              internal_static_hadoop_hdfs_qjournal_NewEpochRequestProto_fieldAccessorTable = new
27146                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27147                  internal_static_hadoop_hdfs_qjournal_NewEpochRequestProto_descriptor,
27148                  new java.lang.String[] { "Jid", "NsInfo", "Epoch", });
27149              internal_static_hadoop_hdfs_qjournal_NewEpochResponseProto_descriptor =
27150                getDescriptor().getMessageTypes().get(35);
27151              internal_static_hadoop_hdfs_qjournal_NewEpochResponseProto_fieldAccessorTable = new
27152                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27153                  internal_static_hadoop_hdfs_qjournal_NewEpochResponseProto_descriptor,
27154                  new java.lang.String[] { "LastSegmentTxId", });
27155              internal_static_hadoop_hdfs_qjournal_GetEditLogManifestRequestProto_descriptor =
27156                getDescriptor().getMessageTypes().get(36);
27157              internal_static_hadoop_hdfs_qjournal_GetEditLogManifestRequestProto_fieldAccessorTable = new
27158                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27159                  internal_static_hadoop_hdfs_qjournal_GetEditLogManifestRequestProto_descriptor,
27160                  new java.lang.String[] { "Jid", "SinceTxId", "InProgressOk", });
27161              internal_static_hadoop_hdfs_qjournal_GetEditLogManifestResponseProto_descriptor =
27162                getDescriptor().getMessageTypes().get(37);
27163              internal_static_hadoop_hdfs_qjournal_GetEditLogManifestResponseProto_fieldAccessorTable = new
27164                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27165                  internal_static_hadoop_hdfs_qjournal_GetEditLogManifestResponseProto_descriptor,
27166                  new java.lang.String[] { "Manifest", "HttpPort", "FromURL", });
27167              internal_static_hadoop_hdfs_qjournal_PrepareRecoveryRequestProto_descriptor =
27168                getDescriptor().getMessageTypes().get(38);
27169              internal_static_hadoop_hdfs_qjournal_PrepareRecoveryRequestProto_fieldAccessorTable = new
27170                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27171                  internal_static_hadoop_hdfs_qjournal_PrepareRecoveryRequestProto_descriptor,
27172                  new java.lang.String[] { "ReqInfo", "SegmentTxId", });
27173              internal_static_hadoop_hdfs_qjournal_PrepareRecoveryResponseProto_descriptor =
27174                getDescriptor().getMessageTypes().get(39);
27175              internal_static_hadoop_hdfs_qjournal_PrepareRecoveryResponseProto_fieldAccessorTable = new
27176                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27177                  internal_static_hadoop_hdfs_qjournal_PrepareRecoveryResponseProto_descriptor,
27178                  new java.lang.String[] { "SegmentState", "AcceptedInEpoch", "LastWriterEpoch", "LastCommittedTxId", });
27179              internal_static_hadoop_hdfs_qjournal_AcceptRecoveryRequestProto_descriptor =
27180                getDescriptor().getMessageTypes().get(40);
27181              internal_static_hadoop_hdfs_qjournal_AcceptRecoveryRequestProto_fieldAccessorTable = new
27182                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27183                  internal_static_hadoop_hdfs_qjournal_AcceptRecoveryRequestProto_descriptor,
27184                  new java.lang.String[] { "ReqInfo", "StateToAccept", "FromURL", });
27185              internal_static_hadoop_hdfs_qjournal_AcceptRecoveryResponseProto_descriptor =
27186                getDescriptor().getMessageTypes().get(41);
27187              internal_static_hadoop_hdfs_qjournal_AcceptRecoveryResponseProto_fieldAccessorTable = new
27188                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
27189                  internal_static_hadoop_hdfs_qjournal_AcceptRecoveryResponseProto_descriptor,
27190                  new java.lang.String[] { });
27191              return null;
27192            }
27193          };
27194        com.google.protobuf.Descriptors.FileDescriptor
27195          .internalBuildGeneratedFileFrom(descriptorData,
27196            new com.google.protobuf.Descriptors.FileDescriptor[] {
27197              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(),
27198            }, assigner);
27199      }
27200    
27201      // @@protoc_insertion_point(outer_class_scope)
27202    }