001    // Generated by the protocol buffer compiler.  DO NOT EDIT!
002    // source: QJournalProtocol.proto
003    
004    package org.apache.hadoop.hdfs.qjournal.protocol;
005    
006    public final class QJournalProtocolProtos {
007      private QJournalProtocolProtos() {}
008      public static void registerAllExtensions(
009          com.google.protobuf.ExtensionRegistry registry) {
010      }
011      public interface JournalIdProtoOrBuilder
012          extends com.google.protobuf.MessageOrBuilder {
013        
014        // required string identifier = 1;
015        boolean hasIdentifier();
016        String getIdentifier();
017      }
018      public static final class JournalIdProto extends
019          com.google.protobuf.GeneratedMessage
020          implements JournalIdProtoOrBuilder {
021        // Use JournalIdProto.newBuilder() to construct.
022        private JournalIdProto(Builder builder) {
023          super(builder);
024        }
025        private JournalIdProto(boolean noInit) {}
026        
027        private static final JournalIdProto defaultInstance;
028        public static JournalIdProto getDefaultInstance() {
029          return defaultInstance;
030        }
031        
032        public JournalIdProto getDefaultInstanceForType() {
033          return defaultInstance;
034        }
035        
036        public static final com.google.protobuf.Descriptors.Descriptor
037            getDescriptor() {
038          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalIdProto_descriptor;
039        }
040        
041        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
042            internalGetFieldAccessorTable() {
043          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalIdProto_fieldAccessorTable;
044        }
045        
046        private int bitField0_;
047        // required string identifier = 1;
048        public static final int IDENTIFIER_FIELD_NUMBER = 1;
049        private java.lang.Object identifier_;
050        public boolean hasIdentifier() {
051          return ((bitField0_ & 0x00000001) == 0x00000001);
052        }
053        public String getIdentifier() {
054          java.lang.Object ref = identifier_;
055          if (ref instanceof String) {
056            return (String) ref;
057          } else {
058            com.google.protobuf.ByteString bs = 
059                (com.google.protobuf.ByteString) ref;
060            String s = bs.toStringUtf8();
061            if (com.google.protobuf.Internal.isValidUtf8(bs)) {
062              identifier_ = s;
063            }
064            return s;
065          }
066        }
067        private com.google.protobuf.ByteString getIdentifierBytes() {
068          java.lang.Object ref = identifier_;
069          if (ref instanceof String) {
070            com.google.protobuf.ByteString b = 
071                com.google.protobuf.ByteString.copyFromUtf8((String) ref);
072            identifier_ = b;
073            return b;
074          } else {
075            return (com.google.protobuf.ByteString) ref;
076          }
077        }
078        
079        private void initFields() {
080          identifier_ = "";
081        }
082        private byte memoizedIsInitialized = -1;
083        public final boolean isInitialized() {
084          byte isInitialized = memoizedIsInitialized;
085          if (isInitialized != -1) return isInitialized == 1;
086          
087          if (!hasIdentifier()) {
088            memoizedIsInitialized = 0;
089            return false;
090          }
091          memoizedIsInitialized = 1;
092          return true;
093        }
094        
095        public void writeTo(com.google.protobuf.CodedOutputStream output)
096                            throws java.io.IOException {
097          getSerializedSize();
098          if (((bitField0_ & 0x00000001) == 0x00000001)) {
099            output.writeBytes(1, getIdentifierBytes());
100          }
101          getUnknownFields().writeTo(output);
102        }
103        
104        private int memoizedSerializedSize = -1;
105        public int getSerializedSize() {
106          int size = memoizedSerializedSize;
107          if (size != -1) return size;
108        
109          size = 0;
110          if (((bitField0_ & 0x00000001) == 0x00000001)) {
111            size += com.google.protobuf.CodedOutputStream
112              .computeBytesSize(1, getIdentifierBytes());
113          }
114          size += getUnknownFields().getSerializedSize();
115          memoizedSerializedSize = size;
116          return size;
117        }
118        
119        private static final long serialVersionUID = 0L;
120        @java.lang.Override
121        protected java.lang.Object writeReplace()
122            throws java.io.ObjectStreamException {
123          return super.writeReplace();
124        }
125        
126        @java.lang.Override
127        public boolean equals(final java.lang.Object obj) {
128          if (obj == this) {
129           return true;
130          }
131          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto)) {
132            return super.equals(obj);
133          }
134          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto) obj;
135          
136          boolean result = true;
137          result = result && (hasIdentifier() == other.hasIdentifier());
138          if (hasIdentifier()) {
139            result = result && getIdentifier()
140                .equals(other.getIdentifier());
141          }
142          result = result &&
143              getUnknownFields().equals(other.getUnknownFields());
144          return result;
145        }
146        
147        @java.lang.Override
148        public int hashCode() {
149          int hash = 41;
150          hash = (19 * hash) + getDescriptorForType().hashCode();
151          if (hasIdentifier()) {
152            hash = (37 * hash) + IDENTIFIER_FIELD_NUMBER;
153            hash = (53 * hash) + getIdentifier().hashCode();
154          }
155          hash = (29 * hash) + getUnknownFields().hashCode();
156          return hash;
157        }
158        
159        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
160            com.google.protobuf.ByteString data)
161            throws com.google.protobuf.InvalidProtocolBufferException {
162          return newBuilder().mergeFrom(data).buildParsed();
163        }
164        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
165            com.google.protobuf.ByteString data,
166            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
167            throws com.google.protobuf.InvalidProtocolBufferException {
168          return newBuilder().mergeFrom(data, extensionRegistry)
169                   .buildParsed();
170        }
171        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(byte[] data)
172            throws com.google.protobuf.InvalidProtocolBufferException {
173          return newBuilder().mergeFrom(data).buildParsed();
174        }
175        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
176            byte[] data,
177            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
178            throws com.google.protobuf.InvalidProtocolBufferException {
179          return newBuilder().mergeFrom(data, extensionRegistry)
180                   .buildParsed();
181        }
182        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(java.io.InputStream input)
183            throws java.io.IOException {
184          return newBuilder().mergeFrom(input).buildParsed();
185        }
186        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
187            java.io.InputStream input,
188            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
189            throws java.io.IOException {
190          return newBuilder().mergeFrom(input, extensionRegistry)
191                   .buildParsed();
192        }
193        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseDelimitedFrom(java.io.InputStream input)
194            throws java.io.IOException {
195          Builder builder = newBuilder();
196          if (builder.mergeDelimitedFrom(input)) {
197            return builder.buildParsed();
198          } else {
199            return null;
200          }
201        }
202        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseDelimitedFrom(
203            java.io.InputStream input,
204            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
205            throws java.io.IOException {
206          Builder builder = newBuilder();
207          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
208            return builder.buildParsed();
209          } else {
210            return null;
211          }
212        }
213        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
214            com.google.protobuf.CodedInputStream input)
215            throws java.io.IOException {
216          return newBuilder().mergeFrom(input).buildParsed();
217        }
218        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto parseFrom(
219            com.google.protobuf.CodedInputStream input,
220            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
221            throws java.io.IOException {
222          return newBuilder().mergeFrom(input, extensionRegistry)
223                   .buildParsed();
224        }
225        
226        public static Builder newBuilder() { return Builder.create(); }
227        public Builder newBuilderForType() { return newBuilder(); }
228        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto prototype) {
229          return newBuilder().mergeFrom(prototype);
230        }
231        public Builder toBuilder() { return newBuilder(this); }
232        
233        @java.lang.Override
234        protected Builder newBuilderForType(
235            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
236          Builder builder = new Builder(parent);
237          return builder;
238        }
239        public static final class Builder extends
240            com.google.protobuf.GeneratedMessage.Builder<Builder>
241           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder {
242          public static final com.google.protobuf.Descriptors.Descriptor
243              getDescriptor() {
244            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalIdProto_descriptor;
245          }
246          
247          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
248              internalGetFieldAccessorTable() {
249            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalIdProto_fieldAccessorTable;
250          }
251          
252          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder()
253          private Builder() {
254            maybeForceBuilderInitialization();
255          }
256          
257          private Builder(BuilderParent parent) {
258            super(parent);
259            maybeForceBuilderInitialization();
260          }
261          private void maybeForceBuilderInitialization() {
262            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
263            }
264          }
265          private static Builder create() {
266            return new Builder();
267          }
268          
269          public Builder clear() {
270            super.clear();
271            identifier_ = "";
272            bitField0_ = (bitField0_ & ~0x00000001);
273            return this;
274          }
275          
276          public Builder clone() {
277            return create().mergeFrom(buildPartial());
278          }
279          
280          public com.google.protobuf.Descriptors.Descriptor
281              getDescriptorForType() {
282            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDescriptor();
283          }
284          
285          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getDefaultInstanceForType() {
286            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
287          }
288          
289          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto build() {
290            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto result = buildPartial();
291            if (!result.isInitialized()) {
292              throw newUninitializedMessageException(result);
293            }
294            return result;
295          }
296          
297          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto buildParsed()
298              throws com.google.protobuf.InvalidProtocolBufferException {
299            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto result = buildPartial();
300            if (!result.isInitialized()) {
301              throw newUninitializedMessageException(
302                result).asInvalidProtocolBufferException();
303            }
304            return result;
305          }
306          
307          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto buildPartial() {
308            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto(this);
309            int from_bitField0_ = bitField0_;
310            int to_bitField0_ = 0;
311            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
312              to_bitField0_ |= 0x00000001;
313            }
314            result.identifier_ = identifier_;
315            result.bitField0_ = to_bitField0_;
316            onBuilt();
317            return result;
318          }
319          
320          public Builder mergeFrom(com.google.protobuf.Message other) {
321            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto) {
322              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto)other);
323            } else {
324              super.mergeFrom(other);
325              return this;
326            }
327          }
328          
329          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto other) {
330            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) return this;
331            if (other.hasIdentifier()) {
332              setIdentifier(other.getIdentifier());
333            }
334            this.mergeUnknownFields(other.getUnknownFields());
335            return this;
336          }
337          
338          public final boolean isInitialized() {
339            if (!hasIdentifier()) {
340              
341              return false;
342            }
343            return true;
344          }
345          
346          public Builder mergeFrom(
347              com.google.protobuf.CodedInputStream input,
348              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
349              throws java.io.IOException {
350            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
351              com.google.protobuf.UnknownFieldSet.newBuilder(
352                this.getUnknownFields());
353            while (true) {
354              int tag = input.readTag();
355              switch (tag) {
356                case 0:
357                  this.setUnknownFields(unknownFields.build());
358                  onChanged();
359                  return this;
360                default: {
361                  if (!parseUnknownField(input, unknownFields,
362                                         extensionRegistry, tag)) {
363                    this.setUnknownFields(unknownFields.build());
364                    onChanged();
365                    return this;
366                  }
367                  break;
368                }
369                case 10: {
370                  bitField0_ |= 0x00000001;
371                  identifier_ = input.readBytes();
372                  break;
373                }
374              }
375            }
376          }
377          
378          private int bitField0_;
379          
380          // required string identifier = 1;
381          private java.lang.Object identifier_ = "";
382          public boolean hasIdentifier() {
383            return ((bitField0_ & 0x00000001) == 0x00000001);
384          }
385          public String getIdentifier() {
386            java.lang.Object ref = identifier_;
387            if (!(ref instanceof String)) {
388              String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
389              identifier_ = s;
390              return s;
391            } else {
392              return (String) ref;
393            }
394          }
395          public Builder setIdentifier(String value) {
396            if (value == null) {
397        throw new NullPointerException();
398      }
399      bitField0_ |= 0x00000001;
400            identifier_ = value;
401            onChanged();
402            return this;
403          }
404          public Builder clearIdentifier() {
405            bitField0_ = (bitField0_ & ~0x00000001);
406            identifier_ = getDefaultInstance().getIdentifier();
407            onChanged();
408            return this;
409          }
410          void setIdentifier(com.google.protobuf.ByteString value) {
411            bitField0_ |= 0x00000001;
412            identifier_ = value;
413            onChanged();
414          }
415          
416          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.JournalIdProto)
417        }
418        
419        static {
420          defaultInstance = new JournalIdProto(true);
421          defaultInstance.initFields();
422        }
423        
424        // @@protoc_insertion_point(class_scope:hadoop.hdfs.JournalIdProto)
425      }
426      
427      public interface RequestInfoProtoOrBuilder
428          extends com.google.protobuf.MessageOrBuilder {
429        
430        // required .hadoop.hdfs.JournalIdProto journalId = 1;
431        boolean hasJournalId();
432        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJournalId();
433        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJournalIdOrBuilder();
434        
435        // required uint64 epoch = 2;
436        boolean hasEpoch();
437        long getEpoch();
438        
439        // required uint64 ipcSerialNumber = 3;
440        boolean hasIpcSerialNumber();
441        long getIpcSerialNumber();
442        
443        // optional uint64 committedTxId = 4;
444        boolean hasCommittedTxId();
445        long getCommittedTxId();
446      }
447      public static final class RequestInfoProto extends
448          com.google.protobuf.GeneratedMessage
449          implements RequestInfoProtoOrBuilder {
450        // Use RequestInfoProto.newBuilder() to construct.
451        private RequestInfoProto(Builder builder) {
452          super(builder);
453        }
454        private RequestInfoProto(boolean noInit) {}
455        
456        private static final RequestInfoProto defaultInstance;
457        public static RequestInfoProto getDefaultInstance() {
458          return defaultInstance;
459        }
460        
461        public RequestInfoProto getDefaultInstanceForType() {
462          return defaultInstance;
463        }
464        
465        public static final com.google.protobuf.Descriptors.Descriptor
466            getDescriptor() {
467          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_RequestInfoProto_descriptor;
468        }
469        
470        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
471            internalGetFieldAccessorTable() {
472          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_RequestInfoProto_fieldAccessorTable;
473        }
474        
475        private int bitField0_;
476        // required .hadoop.hdfs.JournalIdProto journalId = 1;
477        public static final int JOURNALID_FIELD_NUMBER = 1;
478        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto journalId_;
479        public boolean hasJournalId() {
480          return ((bitField0_ & 0x00000001) == 0x00000001);
481        }
482        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJournalId() {
483          return journalId_;
484        }
485        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJournalIdOrBuilder() {
486          return journalId_;
487        }
488        
489        // required uint64 epoch = 2;
490        public static final int EPOCH_FIELD_NUMBER = 2;
491        private long epoch_;
492        public boolean hasEpoch() {
493          return ((bitField0_ & 0x00000002) == 0x00000002);
494        }
495        public long getEpoch() {
496          return epoch_;
497        }
498        
499        // required uint64 ipcSerialNumber = 3;
500        public static final int IPCSERIALNUMBER_FIELD_NUMBER = 3;
501        private long ipcSerialNumber_;
502        public boolean hasIpcSerialNumber() {
503          return ((bitField0_ & 0x00000004) == 0x00000004);
504        }
505        public long getIpcSerialNumber() {
506          return ipcSerialNumber_;
507        }
508        
509        // optional uint64 committedTxId = 4;
510        public static final int COMMITTEDTXID_FIELD_NUMBER = 4;
511        private long committedTxId_;
512        public boolean hasCommittedTxId() {
513          return ((bitField0_ & 0x00000008) == 0x00000008);
514        }
515        public long getCommittedTxId() {
516          return committedTxId_;
517        }
518        
519        private void initFields() {
520          journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
521          epoch_ = 0L;
522          ipcSerialNumber_ = 0L;
523          committedTxId_ = 0L;
524        }
525        private byte memoizedIsInitialized = -1;
526        public final boolean isInitialized() {
527          byte isInitialized = memoizedIsInitialized;
528          if (isInitialized != -1) return isInitialized == 1;
529          
530          if (!hasJournalId()) {
531            memoizedIsInitialized = 0;
532            return false;
533          }
534          if (!hasEpoch()) {
535            memoizedIsInitialized = 0;
536            return false;
537          }
538          if (!hasIpcSerialNumber()) {
539            memoizedIsInitialized = 0;
540            return false;
541          }
542          if (!getJournalId().isInitialized()) {
543            memoizedIsInitialized = 0;
544            return false;
545          }
546          memoizedIsInitialized = 1;
547          return true;
548        }
549        
550        public void writeTo(com.google.protobuf.CodedOutputStream output)
551                            throws java.io.IOException {
552          getSerializedSize();
553          if (((bitField0_ & 0x00000001) == 0x00000001)) {
554            output.writeMessage(1, journalId_);
555          }
556          if (((bitField0_ & 0x00000002) == 0x00000002)) {
557            output.writeUInt64(2, epoch_);
558          }
559          if (((bitField0_ & 0x00000004) == 0x00000004)) {
560            output.writeUInt64(3, ipcSerialNumber_);
561          }
562          if (((bitField0_ & 0x00000008) == 0x00000008)) {
563            output.writeUInt64(4, committedTxId_);
564          }
565          getUnknownFields().writeTo(output);
566        }
567        
568        private int memoizedSerializedSize = -1;
569        public int getSerializedSize() {
570          int size = memoizedSerializedSize;
571          if (size != -1) return size;
572        
573          size = 0;
574          if (((bitField0_ & 0x00000001) == 0x00000001)) {
575            size += com.google.protobuf.CodedOutputStream
576              .computeMessageSize(1, journalId_);
577          }
578          if (((bitField0_ & 0x00000002) == 0x00000002)) {
579            size += com.google.protobuf.CodedOutputStream
580              .computeUInt64Size(2, epoch_);
581          }
582          if (((bitField0_ & 0x00000004) == 0x00000004)) {
583            size += com.google.protobuf.CodedOutputStream
584              .computeUInt64Size(3, ipcSerialNumber_);
585          }
586          if (((bitField0_ & 0x00000008) == 0x00000008)) {
587            size += com.google.protobuf.CodedOutputStream
588              .computeUInt64Size(4, committedTxId_);
589          }
590          size += getUnknownFields().getSerializedSize();
591          memoizedSerializedSize = size;
592          return size;
593        }
594        
595        private static final long serialVersionUID = 0L;
596        @java.lang.Override
597        protected java.lang.Object writeReplace()
598            throws java.io.ObjectStreamException {
599          return super.writeReplace();
600        }
601        
602        @java.lang.Override
603        public boolean equals(final java.lang.Object obj) {
604          if (obj == this) {
605           return true;
606          }
607          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto)) {
608            return super.equals(obj);
609          }
610          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto) obj;
611          
612          boolean result = true;
613          result = result && (hasJournalId() == other.hasJournalId());
614          if (hasJournalId()) {
615            result = result && getJournalId()
616                .equals(other.getJournalId());
617          }
618          result = result && (hasEpoch() == other.hasEpoch());
619          if (hasEpoch()) {
620            result = result && (getEpoch()
621                == other.getEpoch());
622          }
623          result = result && (hasIpcSerialNumber() == other.hasIpcSerialNumber());
624          if (hasIpcSerialNumber()) {
625            result = result && (getIpcSerialNumber()
626                == other.getIpcSerialNumber());
627          }
628          result = result && (hasCommittedTxId() == other.hasCommittedTxId());
629          if (hasCommittedTxId()) {
630            result = result && (getCommittedTxId()
631                == other.getCommittedTxId());
632          }
633          result = result &&
634              getUnknownFields().equals(other.getUnknownFields());
635          return result;
636        }
637        
638        @java.lang.Override
639        public int hashCode() {
640          int hash = 41;
641          hash = (19 * hash) + getDescriptorForType().hashCode();
642          if (hasJournalId()) {
643            hash = (37 * hash) + JOURNALID_FIELD_NUMBER;
644            hash = (53 * hash) + getJournalId().hashCode();
645          }
646          if (hasEpoch()) {
647            hash = (37 * hash) + EPOCH_FIELD_NUMBER;
648            hash = (53 * hash) + hashLong(getEpoch());
649          }
650          if (hasIpcSerialNumber()) {
651            hash = (37 * hash) + IPCSERIALNUMBER_FIELD_NUMBER;
652            hash = (53 * hash) + hashLong(getIpcSerialNumber());
653          }
654          if (hasCommittedTxId()) {
655            hash = (37 * hash) + COMMITTEDTXID_FIELD_NUMBER;
656            hash = (53 * hash) + hashLong(getCommittedTxId());
657          }
658          hash = (29 * hash) + getUnknownFields().hashCode();
659          return hash;
660        }
661        
662        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
663            com.google.protobuf.ByteString data)
664            throws com.google.protobuf.InvalidProtocolBufferException {
665          return newBuilder().mergeFrom(data).buildParsed();
666        }
667        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
668            com.google.protobuf.ByteString data,
669            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
670            throws com.google.protobuf.InvalidProtocolBufferException {
671          return newBuilder().mergeFrom(data, extensionRegistry)
672                   .buildParsed();
673        }
674        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(byte[] data)
675            throws com.google.protobuf.InvalidProtocolBufferException {
676          return newBuilder().mergeFrom(data).buildParsed();
677        }
678        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
679            byte[] data,
680            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
681            throws com.google.protobuf.InvalidProtocolBufferException {
682          return newBuilder().mergeFrom(data, extensionRegistry)
683                   .buildParsed();
684        }
685        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(java.io.InputStream input)
686            throws java.io.IOException {
687          return newBuilder().mergeFrom(input).buildParsed();
688        }
689        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
690            java.io.InputStream input,
691            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
692            throws java.io.IOException {
693          return newBuilder().mergeFrom(input, extensionRegistry)
694                   .buildParsed();
695        }
696        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseDelimitedFrom(java.io.InputStream input)
697            throws java.io.IOException {
698          Builder builder = newBuilder();
699          if (builder.mergeDelimitedFrom(input)) {
700            return builder.buildParsed();
701          } else {
702            return null;
703          }
704        }
705        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseDelimitedFrom(
706            java.io.InputStream input,
707            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
708            throws java.io.IOException {
709          Builder builder = newBuilder();
710          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
711            return builder.buildParsed();
712          } else {
713            return null;
714          }
715        }
716        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
717            com.google.protobuf.CodedInputStream input)
718            throws java.io.IOException {
719          return newBuilder().mergeFrom(input).buildParsed();
720        }
721        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto parseFrom(
722            com.google.protobuf.CodedInputStream input,
723            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
724            throws java.io.IOException {
725          return newBuilder().mergeFrom(input, extensionRegistry)
726                   .buildParsed();
727        }
728        
729        public static Builder newBuilder() { return Builder.create(); }
730        public Builder newBuilderForType() { return newBuilder(); }
731        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto prototype) {
732          return newBuilder().mergeFrom(prototype);
733        }
734        public Builder toBuilder() { return newBuilder(this); }
735        
736        @java.lang.Override
737        protected Builder newBuilderForType(
738            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
739          Builder builder = new Builder(parent);
740          return builder;
741        }
742        public static final class Builder extends
743            com.google.protobuf.GeneratedMessage.Builder<Builder>
744           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder {
745          public static final com.google.protobuf.Descriptors.Descriptor
746              getDescriptor() {
747            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_RequestInfoProto_descriptor;
748          }
749          
750          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
751              internalGetFieldAccessorTable() {
752            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_RequestInfoProto_fieldAccessorTable;
753          }
754          
755          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder()
756          private Builder() {
757            maybeForceBuilderInitialization();
758          }
759          
760          private Builder(BuilderParent parent) {
761            super(parent);
762            maybeForceBuilderInitialization();
763          }
764          private void maybeForceBuilderInitialization() {
765            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
766              getJournalIdFieldBuilder();
767            }
768          }
769          private static Builder create() {
770            return new Builder();
771          }
772          
773          public Builder clear() {
774            super.clear();
775            if (journalIdBuilder_ == null) {
776              journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
777            } else {
778              journalIdBuilder_.clear();
779            }
780            bitField0_ = (bitField0_ & ~0x00000001);
781            epoch_ = 0L;
782            bitField0_ = (bitField0_ & ~0x00000002);
783            ipcSerialNumber_ = 0L;
784            bitField0_ = (bitField0_ & ~0x00000004);
785            committedTxId_ = 0L;
786            bitField0_ = (bitField0_ & ~0x00000008);
787            return this;
788          }
789          
790          public Builder clone() {
791            return create().mergeFrom(buildPartial());
792          }
793          
794          public com.google.protobuf.Descriptors.Descriptor
795              getDescriptorForType() {
796            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDescriptor();
797          }
798          
799          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getDefaultInstanceForType() {
800            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
801          }
802          
803          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto build() {
804            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto result = buildPartial();
805            if (!result.isInitialized()) {
806              throw newUninitializedMessageException(result);
807            }
808            return result;
809          }
810          
811          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto buildParsed()
812              throws com.google.protobuf.InvalidProtocolBufferException {
813            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto result = buildPartial();
814            if (!result.isInitialized()) {
815              throw newUninitializedMessageException(
816                result).asInvalidProtocolBufferException();
817            }
818            return result;
819          }
820          
821          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto buildPartial() {
822            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto(this);
823            int from_bitField0_ = bitField0_;
824            int to_bitField0_ = 0;
825            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
826              to_bitField0_ |= 0x00000001;
827            }
828            if (journalIdBuilder_ == null) {
829              result.journalId_ = journalId_;
830            } else {
831              result.journalId_ = journalIdBuilder_.build();
832            }
833            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
834              to_bitField0_ |= 0x00000002;
835            }
836            result.epoch_ = epoch_;
837            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
838              to_bitField0_ |= 0x00000004;
839            }
840            result.ipcSerialNumber_ = ipcSerialNumber_;
841            if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
842              to_bitField0_ |= 0x00000008;
843            }
844            result.committedTxId_ = committedTxId_;
845            result.bitField0_ = to_bitField0_;
846            onBuilt();
847            return result;
848          }
849          
850          public Builder mergeFrom(com.google.protobuf.Message other) {
851            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto) {
852              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto)other);
853            } else {
854              super.mergeFrom(other);
855              return this;
856            }
857          }
858          
859          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto other) {
860            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) return this;
861            if (other.hasJournalId()) {
862              mergeJournalId(other.getJournalId());
863            }
864            if (other.hasEpoch()) {
865              setEpoch(other.getEpoch());
866            }
867            if (other.hasIpcSerialNumber()) {
868              setIpcSerialNumber(other.getIpcSerialNumber());
869            }
870            if (other.hasCommittedTxId()) {
871              setCommittedTxId(other.getCommittedTxId());
872            }
873            this.mergeUnknownFields(other.getUnknownFields());
874            return this;
875          }
876          
877          public final boolean isInitialized() {
878            if (!hasJournalId()) {
879              
880              return false;
881            }
882            if (!hasEpoch()) {
883              
884              return false;
885            }
886            if (!hasIpcSerialNumber()) {
887              
888              return false;
889            }
890            if (!getJournalId().isInitialized()) {
891              
892              return false;
893            }
894            return true;
895          }
896          
897          public Builder mergeFrom(
898              com.google.protobuf.CodedInputStream input,
899              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
900              throws java.io.IOException {
901            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
902              com.google.protobuf.UnknownFieldSet.newBuilder(
903                this.getUnknownFields());
904            while (true) {
905              int tag = input.readTag();
906              switch (tag) {
907                case 0:
908                  this.setUnknownFields(unknownFields.build());
909                  onChanged();
910                  return this;
911                default: {
912                  if (!parseUnknownField(input, unknownFields,
913                                         extensionRegistry, tag)) {
914                    this.setUnknownFields(unknownFields.build());
915                    onChanged();
916                    return this;
917                  }
918                  break;
919                }
920                case 10: {
921                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder();
922                  if (hasJournalId()) {
923                    subBuilder.mergeFrom(getJournalId());
924                  }
925                  input.readMessage(subBuilder, extensionRegistry);
926                  setJournalId(subBuilder.buildPartial());
927                  break;
928                }
929                case 16: {
930                  bitField0_ |= 0x00000002;
931                  epoch_ = input.readUInt64();
932                  break;
933                }
934                case 24: {
935                  bitField0_ |= 0x00000004;
936                  ipcSerialNumber_ = input.readUInt64();
937                  break;
938                }
939                case 32: {
940                  bitField0_ |= 0x00000008;
941                  committedTxId_ = input.readUInt64();
942                  break;
943                }
944              }
945            }
946          }
947          
948          private int bitField0_;
949          
950          // required .hadoop.hdfs.JournalIdProto journalId = 1;
951          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
952          private com.google.protobuf.SingleFieldBuilder<
953              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> journalIdBuilder_;
954          public boolean hasJournalId() {
955            return ((bitField0_ & 0x00000001) == 0x00000001);
956          }
957          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJournalId() {
958            if (journalIdBuilder_ == null) {
959              return journalId_;
960            } else {
961              return journalIdBuilder_.getMessage();
962            }
963          }
964          public Builder setJournalId(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
965            if (journalIdBuilder_ == null) {
966              if (value == null) {
967                throw new NullPointerException();
968              }
969              journalId_ = value;
970              onChanged();
971            } else {
972              journalIdBuilder_.setMessage(value);
973            }
974            bitField0_ |= 0x00000001;
975            return this;
976          }
977          public Builder setJournalId(
978              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
979            if (journalIdBuilder_ == null) {
980              journalId_ = builderForValue.build();
981              onChanged();
982            } else {
983              journalIdBuilder_.setMessage(builderForValue.build());
984            }
985            bitField0_ |= 0x00000001;
986            return this;
987          }
988          public Builder mergeJournalId(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
989            if (journalIdBuilder_ == null) {
990              if (((bitField0_ & 0x00000001) == 0x00000001) &&
991                  journalId_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
992                journalId_ =
993                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(journalId_).mergeFrom(value).buildPartial();
994              } else {
995                journalId_ = value;
996              }
997              onChanged();
998            } else {
999              journalIdBuilder_.mergeFrom(value);
1000            }
1001            bitField0_ |= 0x00000001;
1002            return this;
1003          }
1004          public Builder clearJournalId() {
1005            if (journalIdBuilder_ == null) {
1006              journalId_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
1007              onChanged();
1008            } else {
1009              journalIdBuilder_.clear();
1010            }
1011            bitField0_ = (bitField0_ & ~0x00000001);
1012            return this;
1013          }
1014          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJournalIdBuilder() {
1015            bitField0_ |= 0x00000001;
1016            onChanged();
1017            return getJournalIdFieldBuilder().getBuilder();
1018          }
1019          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJournalIdOrBuilder() {
1020            if (journalIdBuilder_ != null) {
1021              return journalIdBuilder_.getMessageOrBuilder();
1022            } else {
1023              return journalId_;
1024            }
1025          }
1026          private com.google.protobuf.SingleFieldBuilder<
1027              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
1028              getJournalIdFieldBuilder() {
1029            if (journalIdBuilder_ == null) {
1030              journalIdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
1031                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
1032                      journalId_,
1033                      getParentForChildren(),
1034                      isClean());
1035              journalId_ = null;
1036            }
1037            return journalIdBuilder_;
1038          }
1039          
1040          // required uint64 epoch = 2;
1041          private long epoch_ ;
1042          public boolean hasEpoch() {
1043            return ((bitField0_ & 0x00000002) == 0x00000002);
1044          }
1045          public long getEpoch() {
1046            return epoch_;
1047          }
1048          public Builder setEpoch(long value) {
1049            bitField0_ |= 0x00000002;
1050            epoch_ = value;
1051            onChanged();
1052            return this;
1053          }
1054          public Builder clearEpoch() {
1055            bitField0_ = (bitField0_ & ~0x00000002);
1056            epoch_ = 0L;
1057            onChanged();
1058            return this;
1059          }
1060          
1061          // required uint64 ipcSerialNumber = 3;
1062          private long ipcSerialNumber_ ;
1063          public boolean hasIpcSerialNumber() {
1064            return ((bitField0_ & 0x00000004) == 0x00000004);
1065          }
1066          public long getIpcSerialNumber() {
1067            return ipcSerialNumber_;
1068          }
1069          public Builder setIpcSerialNumber(long value) {
1070            bitField0_ |= 0x00000004;
1071            ipcSerialNumber_ = value;
1072            onChanged();
1073            return this;
1074          }
1075          public Builder clearIpcSerialNumber() {
1076            bitField0_ = (bitField0_ & ~0x00000004);
1077            ipcSerialNumber_ = 0L;
1078            onChanged();
1079            return this;
1080          }
1081          
1082          // optional uint64 committedTxId = 4;
1083          private long committedTxId_ ;
1084          public boolean hasCommittedTxId() {
1085            return ((bitField0_ & 0x00000008) == 0x00000008);
1086          }
1087          public long getCommittedTxId() {
1088            return committedTxId_;
1089          }
1090          public Builder setCommittedTxId(long value) {
1091            bitField0_ |= 0x00000008;
1092            committedTxId_ = value;
1093            onChanged();
1094            return this;
1095          }
1096          public Builder clearCommittedTxId() {
1097            bitField0_ = (bitField0_ & ~0x00000008);
1098            committedTxId_ = 0L;
1099            onChanged();
1100            return this;
1101          }
1102          
1103          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.RequestInfoProto)
1104        }
1105        
1106        static {
1107          defaultInstance = new RequestInfoProto(true);
1108          defaultInstance.initFields();
1109        }
1110        
1111        // @@protoc_insertion_point(class_scope:hadoop.hdfs.RequestInfoProto)
1112      }
1113      
1114      public interface SegmentStateProtoOrBuilder
1115          extends com.google.protobuf.MessageOrBuilder {
1116        
1117        // required uint64 startTxId = 1;
1118        boolean hasStartTxId();
1119        long getStartTxId();
1120        
1121        // required uint64 endTxId = 2;
1122        boolean hasEndTxId();
1123        long getEndTxId();
1124        
1125        // required bool isInProgress = 3;
1126        boolean hasIsInProgress();
1127        boolean getIsInProgress();
1128      }
1129      public static final class SegmentStateProto extends
1130          com.google.protobuf.GeneratedMessage
1131          implements SegmentStateProtoOrBuilder {
1132        // Use SegmentStateProto.newBuilder() to construct.
1133        private SegmentStateProto(Builder builder) {
1134          super(builder);
1135        }
1136        private SegmentStateProto(boolean noInit) {}
1137        
1138        private static final SegmentStateProto defaultInstance;
1139        public static SegmentStateProto getDefaultInstance() {
1140          return defaultInstance;
1141        }
1142        
1143        public SegmentStateProto getDefaultInstanceForType() {
1144          return defaultInstance;
1145        }
1146        
1147        public static final com.google.protobuf.Descriptors.Descriptor
1148            getDescriptor() {
1149          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_SegmentStateProto_descriptor;
1150        }
1151        
1152        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1153            internalGetFieldAccessorTable() {
1154          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_SegmentStateProto_fieldAccessorTable;
1155        }
1156        
1157        private int bitField0_;
1158        // required uint64 startTxId = 1;
1159        public static final int STARTTXID_FIELD_NUMBER = 1;
1160        private long startTxId_;
1161        public boolean hasStartTxId() {
1162          return ((bitField0_ & 0x00000001) == 0x00000001);
1163        }
1164        public long getStartTxId() {
1165          return startTxId_;
1166        }
1167        
1168        // required uint64 endTxId = 2;
1169        public static final int ENDTXID_FIELD_NUMBER = 2;
1170        private long endTxId_;
1171        public boolean hasEndTxId() {
1172          return ((bitField0_ & 0x00000002) == 0x00000002);
1173        }
1174        public long getEndTxId() {
1175          return endTxId_;
1176        }
1177        
1178        // required bool isInProgress = 3;
1179        public static final int ISINPROGRESS_FIELD_NUMBER = 3;
1180        private boolean isInProgress_;
1181        public boolean hasIsInProgress() {
1182          return ((bitField0_ & 0x00000004) == 0x00000004);
1183        }
1184        public boolean getIsInProgress() {
1185          return isInProgress_;
1186        }
1187        
1188        private void initFields() {
1189          startTxId_ = 0L;
1190          endTxId_ = 0L;
1191          isInProgress_ = false;
1192        }
1193        private byte memoizedIsInitialized = -1;
1194        public final boolean isInitialized() {
1195          byte isInitialized = memoizedIsInitialized;
1196          if (isInitialized != -1) return isInitialized == 1;
1197          
1198          if (!hasStartTxId()) {
1199            memoizedIsInitialized = 0;
1200            return false;
1201          }
1202          if (!hasEndTxId()) {
1203            memoizedIsInitialized = 0;
1204            return false;
1205          }
1206          if (!hasIsInProgress()) {
1207            memoizedIsInitialized = 0;
1208            return false;
1209          }
1210          memoizedIsInitialized = 1;
1211          return true;
1212        }
1213        
1214        public void writeTo(com.google.protobuf.CodedOutputStream output)
1215                            throws java.io.IOException {
1216          getSerializedSize();
1217          if (((bitField0_ & 0x00000001) == 0x00000001)) {
1218            output.writeUInt64(1, startTxId_);
1219          }
1220          if (((bitField0_ & 0x00000002) == 0x00000002)) {
1221            output.writeUInt64(2, endTxId_);
1222          }
1223          if (((bitField0_ & 0x00000004) == 0x00000004)) {
1224            output.writeBool(3, isInProgress_);
1225          }
1226          getUnknownFields().writeTo(output);
1227        }
1228        
1229        private int memoizedSerializedSize = -1;
1230        public int getSerializedSize() {
1231          int size = memoizedSerializedSize;
1232          if (size != -1) return size;
1233        
1234          size = 0;
1235          if (((bitField0_ & 0x00000001) == 0x00000001)) {
1236            size += com.google.protobuf.CodedOutputStream
1237              .computeUInt64Size(1, startTxId_);
1238          }
1239          if (((bitField0_ & 0x00000002) == 0x00000002)) {
1240            size += com.google.protobuf.CodedOutputStream
1241              .computeUInt64Size(2, endTxId_);
1242          }
1243          if (((bitField0_ & 0x00000004) == 0x00000004)) {
1244            size += com.google.protobuf.CodedOutputStream
1245              .computeBoolSize(3, isInProgress_);
1246          }
1247          size += getUnknownFields().getSerializedSize();
1248          memoizedSerializedSize = size;
1249          return size;
1250        }
1251        
1252        private static final long serialVersionUID = 0L;
1253        @java.lang.Override
1254        protected java.lang.Object writeReplace()
1255            throws java.io.ObjectStreamException {
1256          return super.writeReplace();
1257        }
1258        
1259        @java.lang.Override
1260        public boolean equals(final java.lang.Object obj) {
1261          if (obj == this) {
1262           return true;
1263          }
1264          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto)) {
1265            return super.equals(obj);
1266          }
1267          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto) obj;
1268          
1269          boolean result = true;
1270          result = result && (hasStartTxId() == other.hasStartTxId());
1271          if (hasStartTxId()) {
1272            result = result && (getStartTxId()
1273                == other.getStartTxId());
1274          }
1275          result = result && (hasEndTxId() == other.hasEndTxId());
1276          if (hasEndTxId()) {
1277            result = result && (getEndTxId()
1278                == other.getEndTxId());
1279          }
1280          result = result && (hasIsInProgress() == other.hasIsInProgress());
1281          if (hasIsInProgress()) {
1282            result = result && (getIsInProgress()
1283                == other.getIsInProgress());
1284          }
1285          result = result &&
1286              getUnknownFields().equals(other.getUnknownFields());
1287          return result;
1288        }
1289        
1290        @java.lang.Override
1291        public int hashCode() {
1292          int hash = 41;
1293          hash = (19 * hash) + getDescriptorForType().hashCode();
1294          if (hasStartTxId()) {
1295            hash = (37 * hash) + STARTTXID_FIELD_NUMBER;
1296            hash = (53 * hash) + hashLong(getStartTxId());
1297          }
1298          if (hasEndTxId()) {
1299            hash = (37 * hash) + ENDTXID_FIELD_NUMBER;
1300            hash = (53 * hash) + hashLong(getEndTxId());
1301          }
1302          if (hasIsInProgress()) {
1303            hash = (37 * hash) + ISINPROGRESS_FIELD_NUMBER;
1304            hash = (53 * hash) + hashBoolean(getIsInProgress());
1305          }
1306          hash = (29 * hash) + getUnknownFields().hashCode();
1307          return hash;
1308        }
1309        
1310        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1311            com.google.protobuf.ByteString data)
1312            throws com.google.protobuf.InvalidProtocolBufferException {
1313          return newBuilder().mergeFrom(data).buildParsed();
1314        }
1315        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1316            com.google.protobuf.ByteString data,
1317            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1318            throws com.google.protobuf.InvalidProtocolBufferException {
1319          return newBuilder().mergeFrom(data, extensionRegistry)
1320                   .buildParsed();
1321        }
1322        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(byte[] data)
1323            throws com.google.protobuf.InvalidProtocolBufferException {
1324          return newBuilder().mergeFrom(data).buildParsed();
1325        }
1326        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1327            byte[] data,
1328            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1329            throws com.google.protobuf.InvalidProtocolBufferException {
1330          return newBuilder().mergeFrom(data, extensionRegistry)
1331                   .buildParsed();
1332        }
1333        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(java.io.InputStream input)
1334            throws java.io.IOException {
1335          return newBuilder().mergeFrom(input).buildParsed();
1336        }
1337        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1338            java.io.InputStream input,
1339            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1340            throws java.io.IOException {
1341          return newBuilder().mergeFrom(input, extensionRegistry)
1342                   .buildParsed();
1343        }
1344        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseDelimitedFrom(java.io.InputStream input)
1345            throws java.io.IOException {
1346          Builder builder = newBuilder();
1347          if (builder.mergeDelimitedFrom(input)) {
1348            return builder.buildParsed();
1349          } else {
1350            return null;
1351          }
1352        }
1353        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseDelimitedFrom(
1354            java.io.InputStream input,
1355            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1356            throws java.io.IOException {
1357          Builder builder = newBuilder();
1358          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
1359            return builder.buildParsed();
1360          } else {
1361            return null;
1362          }
1363        }
1364        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1365            com.google.protobuf.CodedInputStream input)
1366            throws java.io.IOException {
1367          return newBuilder().mergeFrom(input).buildParsed();
1368        }
1369        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto parseFrom(
1370            com.google.protobuf.CodedInputStream input,
1371            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1372            throws java.io.IOException {
1373          return newBuilder().mergeFrom(input, extensionRegistry)
1374                   .buildParsed();
1375        }
1376        
1377        public static Builder newBuilder() { return Builder.create(); }
1378        public Builder newBuilderForType() { return newBuilder(); }
1379        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto prototype) {
1380          return newBuilder().mergeFrom(prototype);
1381        }
1382        public Builder toBuilder() { return newBuilder(this); }
1383        
1384        @java.lang.Override
1385        protected Builder newBuilderForType(
1386            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1387          Builder builder = new Builder(parent);
1388          return builder;
1389        }
1390        public static final class Builder extends
1391            com.google.protobuf.GeneratedMessage.Builder<Builder>
1392           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder {
1393          public static final com.google.protobuf.Descriptors.Descriptor
1394              getDescriptor() {
1395            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_SegmentStateProto_descriptor;
1396          }
1397          
1398          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1399              internalGetFieldAccessorTable() {
1400            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_SegmentStateProto_fieldAccessorTable;
1401          }
1402          
1403          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder()
1404          private Builder() {
1405            maybeForceBuilderInitialization();
1406          }
1407          
1408          private Builder(BuilderParent parent) {
1409            super(parent);
1410            maybeForceBuilderInitialization();
1411          }
1412          private void maybeForceBuilderInitialization() {
1413            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
1414            }
1415          }
1416          private static Builder create() {
1417            return new Builder();
1418          }
1419          
1420          public Builder clear() {
1421            super.clear();
1422            startTxId_ = 0L;
1423            bitField0_ = (bitField0_ & ~0x00000001);
1424            endTxId_ = 0L;
1425            bitField0_ = (bitField0_ & ~0x00000002);
1426            isInProgress_ = false;
1427            bitField0_ = (bitField0_ & ~0x00000004);
1428            return this;
1429          }
1430          
1431          public Builder clone() {
1432            return create().mergeFrom(buildPartial());
1433          }
1434          
1435          public com.google.protobuf.Descriptors.Descriptor
1436              getDescriptorForType() {
1437            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDescriptor();
1438          }
1439          
1440          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getDefaultInstanceForType() {
1441            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
1442          }
1443          
1444          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto build() {
1445            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto result = buildPartial();
1446            if (!result.isInitialized()) {
1447              throw newUninitializedMessageException(result);
1448            }
1449            return result;
1450          }
1451          
1452          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto buildParsed()
1453              throws com.google.protobuf.InvalidProtocolBufferException {
1454            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto result = buildPartial();
1455            if (!result.isInitialized()) {
1456              throw newUninitializedMessageException(
1457                result).asInvalidProtocolBufferException();
1458            }
1459            return result;
1460          }
1461          
1462          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto buildPartial() {
1463            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto(this);
1464            int from_bitField0_ = bitField0_;
1465            int to_bitField0_ = 0;
1466            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
1467              to_bitField0_ |= 0x00000001;
1468            }
1469            result.startTxId_ = startTxId_;
1470            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
1471              to_bitField0_ |= 0x00000002;
1472            }
1473            result.endTxId_ = endTxId_;
1474            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
1475              to_bitField0_ |= 0x00000004;
1476            }
1477            result.isInProgress_ = isInProgress_;
1478            result.bitField0_ = to_bitField0_;
1479            onBuilt();
1480            return result;
1481          }
1482          
1483          public Builder mergeFrom(com.google.protobuf.Message other) {
1484            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto) {
1485              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto)other);
1486            } else {
1487              super.mergeFrom(other);
1488              return this;
1489            }
1490          }
1491          
1492          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto other) {
1493            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) return this;
1494            if (other.hasStartTxId()) {
1495              setStartTxId(other.getStartTxId());
1496            }
1497            if (other.hasEndTxId()) {
1498              setEndTxId(other.getEndTxId());
1499            }
1500            if (other.hasIsInProgress()) {
1501              setIsInProgress(other.getIsInProgress());
1502            }
1503            this.mergeUnknownFields(other.getUnknownFields());
1504            return this;
1505          }
1506          
1507          public final boolean isInitialized() {
1508            if (!hasStartTxId()) {
1509              
1510              return false;
1511            }
1512            if (!hasEndTxId()) {
1513              
1514              return false;
1515            }
1516            if (!hasIsInProgress()) {
1517              
1518              return false;
1519            }
1520            return true;
1521          }
1522          
1523          public Builder mergeFrom(
1524              com.google.protobuf.CodedInputStream input,
1525              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1526              throws java.io.IOException {
1527            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
1528              com.google.protobuf.UnknownFieldSet.newBuilder(
1529                this.getUnknownFields());
1530            while (true) {
1531              int tag = input.readTag();
1532              switch (tag) {
1533                case 0:
1534                  this.setUnknownFields(unknownFields.build());
1535                  onChanged();
1536                  return this;
1537                default: {
1538                  if (!parseUnknownField(input, unknownFields,
1539                                         extensionRegistry, tag)) {
1540                    this.setUnknownFields(unknownFields.build());
1541                    onChanged();
1542                    return this;
1543                  }
1544                  break;
1545                }
1546                case 8: {
1547                  bitField0_ |= 0x00000001;
1548                  startTxId_ = input.readUInt64();
1549                  break;
1550                }
1551                case 16: {
1552                  bitField0_ |= 0x00000002;
1553                  endTxId_ = input.readUInt64();
1554                  break;
1555                }
1556                case 24: {
1557                  bitField0_ |= 0x00000004;
1558                  isInProgress_ = input.readBool();
1559                  break;
1560                }
1561              }
1562            }
1563          }
1564          
1565          private int bitField0_;
1566          
1567          // required uint64 startTxId = 1;
1568          private long startTxId_ ;
1569          public boolean hasStartTxId() {
1570            return ((bitField0_ & 0x00000001) == 0x00000001);
1571          }
1572          public long getStartTxId() {
1573            return startTxId_;
1574          }
1575          public Builder setStartTxId(long value) {
1576            bitField0_ |= 0x00000001;
1577            startTxId_ = value;
1578            onChanged();
1579            return this;
1580          }
1581          public Builder clearStartTxId() {
1582            bitField0_ = (bitField0_ & ~0x00000001);
1583            startTxId_ = 0L;
1584            onChanged();
1585            return this;
1586          }
1587          
1588          // required uint64 endTxId = 2;
1589          private long endTxId_ ;
1590          public boolean hasEndTxId() {
1591            return ((bitField0_ & 0x00000002) == 0x00000002);
1592          }
1593          public long getEndTxId() {
1594            return endTxId_;
1595          }
1596          public Builder setEndTxId(long value) {
1597            bitField0_ |= 0x00000002;
1598            endTxId_ = value;
1599            onChanged();
1600            return this;
1601          }
1602          public Builder clearEndTxId() {
1603            bitField0_ = (bitField0_ & ~0x00000002);
1604            endTxId_ = 0L;
1605            onChanged();
1606            return this;
1607          }
1608          
1609          // required bool isInProgress = 3;
1610          private boolean isInProgress_ ;
1611          public boolean hasIsInProgress() {
1612            return ((bitField0_ & 0x00000004) == 0x00000004);
1613          }
1614          public boolean getIsInProgress() {
1615            return isInProgress_;
1616          }
1617          public Builder setIsInProgress(boolean value) {
1618            bitField0_ |= 0x00000004;
1619            isInProgress_ = value;
1620            onChanged();
1621            return this;
1622          }
1623          public Builder clearIsInProgress() {
1624            bitField0_ = (bitField0_ & ~0x00000004);
1625            isInProgress_ = false;
1626            onChanged();
1627            return this;
1628          }
1629          
1630          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.SegmentStateProto)
1631        }
1632        
1633        static {
1634          defaultInstance = new SegmentStateProto(true);
1635          defaultInstance.initFields();
1636        }
1637        
1638        // @@protoc_insertion_point(class_scope:hadoop.hdfs.SegmentStateProto)
1639      }
1640      
1641      public interface PersistedRecoveryPaxosDataOrBuilder
1642          extends com.google.protobuf.MessageOrBuilder {
1643        
1644        // required .hadoop.hdfs.SegmentStateProto segmentState = 1;
1645        boolean hasSegmentState();
1646        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState();
1647        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder();
1648        
1649        // required uint64 acceptedInEpoch = 2;
1650        boolean hasAcceptedInEpoch();
1651        long getAcceptedInEpoch();
1652      }
1653      public static final class PersistedRecoveryPaxosData extends
1654          com.google.protobuf.GeneratedMessage
1655          implements PersistedRecoveryPaxosDataOrBuilder {
1656        // Use PersistedRecoveryPaxosData.newBuilder() to construct.
1657        private PersistedRecoveryPaxosData(Builder builder) {
1658          super(builder);
1659        }
1660        private PersistedRecoveryPaxosData(boolean noInit) {}
1661        
1662        private static final PersistedRecoveryPaxosData defaultInstance;
1663        public static PersistedRecoveryPaxosData getDefaultInstance() {
1664          return defaultInstance;
1665        }
1666        
1667        public PersistedRecoveryPaxosData getDefaultInstanceForType() {
1668          return defaultInstance;
1669        }
1670        
1671        public static final com.google.protobuf.Descriptors.Descriptor
1672            getDescriptor() {
1673          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor;
1674        }
1675        
1676        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1677            internalGetFieldAccessorTable() {
1678          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_fieldAccessorTable;
1679        }
1680        
1681        private int bitField0_;
1682        // required .hadoop.hdfs.SegmentStateProto segmentState = 1;
1683        public static final int SEGMENTSTATE_FIELD_NUMBER = 1;
1684        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_;
1685        public boolean hasSegmentState() {
1686          return ((bitField0_ & 0x00000001) == 0x00000001);
1687        }
1688        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
1689          return segmentState_;
1690        }
1691        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
1692          return segmentState_;
1693        }
1694        
1695        // required uint64 acceptedInEpoch = 2;
1696        public static final int ACCEPTEDINEPOCH_FIELD_NUMBER = 2;
1697        private long acceptedInEpoch_;
1698        public boolean hasAcceptedInEpoch() {
1699          return ((bitField0_ & 0x00000002) == 0x00000002);
1700        }
1701        public long getAcceptedInEpoch() {
1702          return acceptedInEpoch_;
1703        }
1704        
1705        private void initFields() {
1706          segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
1707          acceptedInEpoch_ = 0L;
1708        }
1709        private byte memoizedIsInitialized = -1;
1710        public final boolean isInitialized() {
1711          byte isInitialized = memoizedIsInitialized;
1712          if (isInitialized != -1) return isInitialized == 1;
1713          
1714          if (!hasSegmentState()) {
1715            memoizedIsInitialized = 0;
1716            return false;
1717          }
1718          if (!hasAcceptedInEpoch()) {
1719            memoizedIsInitialized = 0;
1720            return false;
1721          }
1722          if (!getSegmentState().isInitialized()) {
1723            memoizedIsInitialized = 0;
1724            return false;
1725          }
1726          memoizedIsInitialized = 1;
1727          return true;
1728        }
1729        
1730        public void writeTo(com.google.protobuf.CodedOutputStream output)
1731                            throws java.io.IOException {
1732          getSerializedSize();
1733          if (((bitField0_ & 0x00000001) == 0x00000001)) {
1734            output.writeMessage(1, segmentState_);
1735          }
1736          if (((bitField0_ & 0x00000002) == 0x00000002)) {
1737            output.writeUInt64(2, acceptedInEpoch_);
1738          }
1739          getUnknownFields().writeTo(output);
1740        }
1741        
1742        private int memoizedSerializedSize = -1;
1743        public int getSerializedSize() {
1744          int size = memoizedSerializedSize;
1745          if (size != -1) return size;
1746        
1747          size = 0;
1748          if (((bitField0_ & 0x00000001) == 0x00000001)) {
1749            size += com.google.protobuf.CodedOutputStream
1750              .computeMessageSize(1, segmentState_);
1751          }
1752          if (((bitField0_ & 0x00000002) == 0x00000002)) {
1753            size += com.google.protobuf.CodedOutputStream
1754              .computeUInt64Size(2, acceptedInEpoch_);
1755          }
1756          size += getUnknownFields().getSerializedSize();
1757          memoizedSerializedSize = size;
1758          return size;
1759        }
1760        
1761        private static final long serialVersionUID = 0L;
1762        @java.lang.Override
1763        protected java.lang.Object writeReplace()
1764            throws java.io.ObjectStreamException {
1765          return super.writeReplace();
1766        }
1767        
1768        @java.lang.Override
1769        public boolean equals(final java.lang.Object obj) {
1770          if (obj == this) {
1771           return true;
1772          }
1773          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData)) {
1774            return super.equals(obj);
1775          }
1776          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData) obj;
1777          
1778          boolean result = true;
1779          result = result && (hasSegmentState() == other.hasSegmentState());
1780          if (hasSegmentState()) {
1781            result = result && getSegmentState()
1782                .equals(other.getSegmentState());
1783          }
1784          result = result && (hasAcceptedInEpoch() == other.hasAcceptedInEpoch());
1785          if (hasAcceptedInEpoch()) {
1786            result = result && (getAcceptedInEpoch()
1787                == other.getAcceptedInEpoch());
1788          }
1789          result = result &&
1790              getUnknownFields().equals(other.getUnknownFields());
1791          return result;
1792        }
1793        
1794        @java.lang.Override
1795        public int hashCode() {
1796          int hash = 41;
1797          hash = (19 * hash) + getDescriptorForType().hashCode();
1798          if (hasSegmentState()) {
1799            hash = (37 * hash) + SEGMENTSTATE_FIELD_NUMBER;
1800            hash = (53 * hash) + getSegmentState().hashCode();
1801          }
1802          if (hasAcceptedInEpoch()) {
1803            hash = (37 * hash) + ACCEPTEDINEPOCH_FIELD_NUMBER;
1804            hash = (53 * hash) + hashLong(getAcceptedInEpoch());
1805          }
1806          hash = (29 * hash) + getUnknownFields().hashCode();
1807          return hash;
1808        }
1809        
1810        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
1811            com.google.protobuf.ByteString data)
1812            throws com.google.protobuf.InvalidProtocolBufferException {
1813          return newBuilder().mergeFrom(data).buildParsed();
1814        }
1815        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
1816            com.google.protobuf.ByteString data,
1817            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1818            throws com.google.protobuf.InvalidProtocolBufferException {
1819          return newBuilder().mergeFrom(data, extensionRegistry)
1820                   .buildParsed();
1821        }
1822        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(byte[] data)
1823            throws com.google.protobuf.InvalidProtocolBufferException {
1824          return newBuilder().mergeFrom(data).buildParsed();
1825        }
1826        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
1827            byte[] data,
1828            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1829            throws com.google.protobuf.InvalidProtocolBufferException {
1830          return newBuilder().mergeFrom(data, extensionRegistry)
1831                   .buildParsed();
1832        }
1833        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(java.io.InputStream input)
1834            throws java.io.IOException {
1835          return newBuilder().mergeFrom(input).buildParsed();
1836        }
1837        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
1838            java.io.InputStream input,
1839            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1840            throws java.io.IOException {
1841          return newBuilder().mergeFrom(input, extensionRegistry)
1842                   .buildParsed();
1843        }
1844        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseDelimitedFrom(java.io.InputStream input)
1845            throws java.io.IOException {
1846          Builder builder = newBuilder();
1847          if (builder.mergeDelimitedFrom(input)) {
1848            return builder.buildParsed();
1849          } else {
1850            return null;
1851          }
1852        }
1853        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseDelimitedFrom(
1854            java.io.InputStream input,
1855            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1856            throws java.io.IOException {
1857          Builder builder = newBuilder();
1858          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
1859            return builder.buildParsed();
1860          } else {
1861            return null;
1862          }
1863        }
1864        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
1865            com.google.protobuf.CodedInputStream input)
1866            throws java.io.IOException {
1867          return newBuilder().mergeFrom(input).buildParsed();
1868        }
1869        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData parseFrom(
1870            com.google.protobuf.CodedInputStream input,
1871            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
1872            throws java.io.IOException {
1873          return newBuilder().mergeFrom(input, extensionRegistry)
1874                   .buildParsed();
1875        }
1876        
1877        public static Builder newBuilder() { return Builder.create(); }
1878        public Builder newBuilderForType() { return newBuilder(); }
1879        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData prototype) {
1880          return newBuilder().mergeFrom(prototype);
1881        }
1882        public Builder toBuilder() { return newBuilder(this); }
1883        
1884        @java.lang.Override
1885        protected Builder newBuilderForType(
1886            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
1887          Builder builder = new Builder(parent);
1888          return builder;
1889        }
1890        public static final class Builder extends
1891            com.google.protobuf.GeneratedMessage.Builder<Builder>
1892           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosDataOrBuilder {
1893          public static final com.google.protobuf.Descriptors.Descriptor
1894              getDescriptor() {
1895            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor;
1896          }
1897          
1898          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
1899              internalGetFieldAccessorTable() {
1900            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_fieldAccessorTable;
1901          }
1902          
1903          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.newBuilder()
1904          private Builder() {
1905            maybeForceBuilderInitialization();
1906          }
1907          
1908          private Builder(BuilderParent parent) {
1909            super(parent);
1910            maybeForceBuilderInitialization();
1911          }
1912          private void maybeForceBuilderInitialization() {
1913            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
1914              getSegmentStateFieldBuilder();
1915            }
1916          }
1917          private static Builder create() {
1918            return new Builder();
1919          }
1920          
1921          public Builder clear() {
1922            super.clear();
1923            if (segmentStateBuilder_ == null) {
1924              segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
1925            } else {
1926              segmentStateBuilder_.clear();
1927            }
1928            bitField0_ = (bitField0_ & ~0x00000001);
1929            acceptedInEpoch_ = 0L;
1930            bitField0_ = (bitField0_ & ~0x00000002);
1931            return this;
1932          }
1933          
1934          public Builder clone() {
1935            return create().mergeFrom(buildPartial());
1936          }
1937          
1938          public com.google.protobuf.Descriptors.Descriptor
1939              getDescriptorForType() {
1940            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.getDescriptor();
1941          }
1942          
1943          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData getDefaultInstanceForType() {
1944            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.getDefaultInstance();
1945          }
1946          
1947          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData build() {
1948            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData result = buildPartial();
1949            if (!result.isInitialized()) {
1950              throw newUninitializedMessageException(result);
1951            }
1952            return result;
1953          }
1954          
1955          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData buildParsed()
1956              throws com.google.protobuf.InvalidProtocolBufferException {
1957            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData result = buildPartial();
1958            if (!result.isInitialized()) {
1959              throw newUninitializedMessageException(
1960                result).asInvalidProtocolBufferException();
1961            }
1962            return result;
1963          }
1964          
1965          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData buildPartial() {
1966            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData(this);
1967            int from_bitField0_ = bitField0_;
1968            int to_bitField0_ = 0;
1969            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
1970              to_bitField0_ |= 0x00000001;
1971            }
1972            if (segmentStateBuilder_ == null) {
1973              result.segmentState_ = segmentState_;
1974            } else {
1975              result.segmentState_ = segmentStateBuilder_.build();
1976            }
1977            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
1978              to_bitField0_ |= 0x00000002;
1979            }
1980            result.acceptedInEpoch_ = acceptedInEpoch_;
1981            result.bitField0_ = to_bitField0_;
1982            onBuilt();
1983            return result;
1984          }
1985          
1986          public Builder mergeFrom(com.google.protobuf.Message other) {
1987            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData) {
1988              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData)other);
1989            } else {
1990              super.mergeFrom(other);
1991              return this;
1992            }
1993          }
1994          
1995          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData other) {
1996            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.getDefaultInstance()) return this;
1997            if (other.hasSegmentState()) {
1998              mergeSegmentState(other.getSegmentState());
1999            }
2000            if (other.hasAcceptedInEpoch()) {
2001              setAcceptedInEpoch(other.getAcceptedInEpoch());
2002            }
2003            this.mergeUnknownFields(other.getUnknownFields());
2004            return this;
2005          }
2006          
2007          public final boolean isInitialized() {
2008            if (!hasSegmentState()) {
2009              
2010              return false;
2011            }
2012            if (!hasAcceptedInEpoch()) {
2013              
2014              return false;
2015            }
2016            if (!getSegmentState().isInitialized()) {
2017              
2018              return false;
2019            }
2020            return true;
2021          }
2022          
2023          public Builder mergeFrom(
2024              com.google.protobuf.CodedInputStream input,
2025              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2026              throws java.io.IOException {
2027            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
2028              com.google.protobuf.UnknownFieldSet.newBuilder(
2029                this.getUnknownFields());
2030            while (true) {
2031              int tag = input.readTag();
2032              switch (tag) {
2033                case 0:
2034                  this.setUnknownFields(unknownFields.build());
2035                  onChanged();
2036                  return this;
2037                default: {
2038                  if (!parseUnknownField(input, unknownFields,
2039                                         extensionRegistry, tag)) {
2040                    this.setUnknownFields(unknownFields.build());
2041                    onChanged();
2042                    return this;
2043                  }
2044                  break;
2045                }
2046                case 10: {
2047                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder();
2048                  if (hasSegmentState()) {
2049                    subBuilder.mergeFrom(getSegmentState());
2050                  }
2051                  input.readMessage(subBuilder, extensionRegistry);
2052                  setSegmentState(subBuilder.buildPartial());
2053                  break;
2054                }
2055                case 16: {
2056                  bitField0_ |= 0x00000002;
2057                  acceptedInEpoch_ = input.readUInt64();
2058                  break;
2059                }
2060              }
2061            }
2062          }
2063          
2064          private int bitField0_;
2065          
2066          // required .hadoop.hdfs.SegmentStateProto segmentState = 1;
2067          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
2068          private com.google.protobuf.SingleFieldBuilder<
2069              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> segmentStateBuilder_;
2070          public boolean hasSegmentState() {
2071            return ((bitField0_ & 0x00000001) == 0x00000001);
2072          }
2073          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
2074            if (segmentStateBuilder_ == null) {
2075              return segmentState_;
2076            } else {
2077              return segmentStateBuilder_.getMessage();
2078            }
2079          }
2080          public Builder setSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
2081            if (segmentStateBuilder_ == null) {
2082              if (value == null) {
2083                throw new NullPointerException();
2084              }
2085              segmentState_ = value;
2086              onChanged();
2087            } else {
2088              segmentStateBuilder_.setMessage(value);
2089            }
2090            bitField0_ |= 0x00000001;
2091            return this;
2092          }
2093          public Builder setSegmentState(
2094              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) {
2095            if (segmentStateBuilder_ == null) {
2096              segmentState_ = builderForValue.build();
2097              onChanged();
2098            } else {
2099              segmentStateBuilder_.setMessage(builderForValue.build());
2100            }
2101            bitField0_ |= 0x00000001;
2102            return this;
2103          }
2104          public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
2105            if (segmentStateBuilder_ == null) {
2106              if (((bitField0_ & 0x00000001) == 0x00000001) &&
2107                  segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
2108                segmentState_ =
2109                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial();
2110              } else {
2111                segmentState_ = value;
2112              }
2113              onChanged();
2114            } else {
2115              segmentStateBuilder_.mergeFrom(value);
2116            }
2117            bitField0_ |= 0x00000001;
2118            return this;
2119          }
2120          public Builder clearSegmentState() {
2121            if (segmentStateBuilder_ == null) {
2122              segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
2123              onChanged();
2124            } else {
2125              segmentStateBuilder_.clear();
2126            }
2127            bitField0_ = (bitField0_ & ~0x00000001);
2128            return this;
2129          }
2130          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getSegmentStateBuilder() {
2131            bitField0_ |= 0x00000001;
2132            onChanged();
2133            return getSegmentStateFieldBuilder().getBuilder();
2134          }
2135          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
2136            if (segmentStateBuilder_ != null) {
2137              return segmentStateBuilder_.getMessageOrBuilder();
2138            } else {
2139              return segmentState_;
2140            }
2141          }
2142          private com.google.protobuf.SingleFieldBuilder<
2143              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> 
2144              getSegmentStateFieldBuilder() {
2145            if (segmentStateBuilder_ == null) {
2146              segmentStateBuilder_ = new com.google.protobuf.SingleFieldBuilder<
2147                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>(
2148                      segmentState_,
2149                      getParentForChildren(),
2150                      isClean());
2151              segmentState_ = null;
2152            }
2153            return segmentStateBuilder_;
2154          }
2155          
2156          // required uint64 acceptedInEpoch = 2;
2157          private long acceptedInEpoch_ ;
2158          public boolean hasAcceptedInEpoch() {
2159            return ((bitField0_ & 0x00000002) == 0x00000002);
2160          }
2161          public long getAcceptedInEpoch() {
2162            return acceptedInEpoch_;
2163          }
2164          public Builder setAcceptedInEpoch(long value) {
2165            bitField0_ |= 0x00000002;
2166            acceptedInEpoch_ = value;
2167            onChanged();
2168            return this;
2169          }
2170          public Builder clearAcceptedInEpoch() {
2171            bitField0_ = (bitField0_ & ~0x00000002);
2172            acceptedInEpoch_ = 0L;
2173            onChanged();
2174            return this;
2175          }
2176          
2177          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PersistedRecoveryPaxosData)
2178        }
2179        
2180        static {
2181          defaultInstance = new PersistedRecoveryPaxosData(true);
2182          defaultInstance.initFields();
2183        }
2184        
2185        // @@protoc_insertion_point(class_scope:hadoop.hdfs.PersistedRecoveryPaxosData)
2186      }
2187      
2188      public interface JournalRequestProtoOrBuilder
2189          extends com.google.protobuf.MessageOrBuilder {
2190        
2191        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
2192        boolean hasReqInfo();
2193        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
2194        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
2195        
2196        // required uint64 firstTxnId = 2;
2197        boolean hasFirstTxnId();
2198        long getFirstTxnId();
2199        
2200        // required uint32 numTxns = 3;
2201        boolean hasNumTxns();
2202        int getNumTxns();
2203        
2204        // required bytes records = 4;
2205        boolean hasRecords();
2206        com.google.protobuf.ByteString getRecords();
2207        
2208        // required uint64 segmentTxnId = 5;
2209        boolean hasSegmentTxnId();
2210        long getSegmentTxnId();
2211      }
2212      public static final class JournalRequestProto extends
2213          com.google.protobuf.GeneratedMessage
2214          implements JournalRequestProtoOrBuilder {
2215        // Use JournalRequestProto.newBuilder() to construct.
2216        private JournalRequestProto(Builder builder) {
2217          super(builder);
2218        }
2219        private JournalRequestProto(boolean noInit) {}
2220        
2221        private static final JournalRequestProto defaultInstance;
2222        public static JournalRequestProto getDefaultInstance() {
2223          return defaultInstance;
2224        }
2225        
2226        public JournalRequestProto getDefaultInstanceForType() {
2227          return defaultInstance;
2228        }
2229        
2230        public static final com.google.protobuf.Descriptors.Descriptor
2231            getDescriptor() {
2232          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalRequestProto_descriptor;
2233        }
2234        
2235        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
2236            internalGetFieldAccessorTable() {
2237          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalRequestProto_fieldAccessorTable;
2238        }
2239        
2240        private int bitField0_;
2241        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
2242        public static final int REQINFO_FIELD_NUMBER = 1;
2243        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
2244        public boolean hasReqInfo() {
2245          return ((bitField0_ & 0x00000001) == 0x00000001);
2246        }
2247        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
2248          return reqInfo_;
2249        }
2250        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
2251          return reqInfo_;
2252        }
2253        
2254        // required uint64 firstTxnId = 2;
2255        public static final int FIRSTTXNID_FIELD_NUMBER = 2;
2256        private long firstTxnId_;
2257        public boolean hasFirstTxnId() {
2258          return ((bitField0_ & 0x00000002) == 0x00000002);
2259        }
2260        public long getFirstTxnId() {
2261          return firstTxnId_;
2262        }
2263        
2264        // required uint32 numTxns = 3;
2265        public static final int NUMTXNS_FIELD_NUMBER = 3;
2266        private int numTxns_;
2267        public boolean hasNumTxns() {
2268          return ((bitField0_ & 0x00000004) == 0x00000004);
2269        }
2270        public int getNumTxns() {
2271          return numTxns_;
2272        }
2273        
2274        // required bytes records = 4;
2275        public static final int RECORDS_FIELD_NUMBER = 4;
2276        private com.google.protobuf.ByteString records_;
2277        public boolean hasRecords() {
2278          return ((bitField0_ & 0x00000008) == 0x00000008);
2279        }
2280        public com.google.protobuf.ByteString getRecords() {
2281          return records_;
2282        }
2283        
2284        // required uint64 segmentTxnId = 5;
2285        public static final int SEGMENTTXNID_FIELD_NUMBER = 5;
2286        private long segmentTxnId_;
2287        public boolean hasSegmentTxnId() {
2288          return ((bitField0_ & 0x00000010) == 0x00000010);
2289        }
2290        public long getSegmentTxnId() {
2291          return segmentTxnId_;
2292        }
2293        
2294        private void initFields() {
2295          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
2296          firstTxnId_ = 0L;
2297          numTxns_ = 0;
2298          records_ = com.google.protobuf.ByteString.EMPTY;
2299          segmentTxnId_ = 0L;
2300        }
2301        private byte memoizedIsInitialized = -1;
2302        public final boolean isInitialized() {
2303          byte isInitialized = memoizedIsInitialized;
2304          if (isInitialized != -1) return isInitialized == 1;
2305          
2306          if (!hasReqInfo()) {
2307            memoizedIsInitialized = 0;
2308            return false;
2309          }
2310          if (!hasFirstTxnId()) {
2311            memoizedIsInitialized = 0;
2312            return false;
2313          }
2314          if (!hasNumTxns()) {
2315            memoizedIsInitialized = 0;
2316            return false;
2317          }
2318          if (!hasRecords()) {
2319            memoizedIsInitialized = 0;
2320            return false;
2321          }
2322          if (!hasSegmentTxnId()) {
2323            memoizedIsInitialized = 0;
2324            return false;
2325          }
2326          if (!getReqInfo().isInitialized()) {
2327            memoizedIsInitialized = 0;
2328            return false;
2329          }
2330          memoizedIsInitialized = 1;
2331          return true;
2332        }
2333        
2334        public void writeTo(com.google.protobuf.CodedOutputStream output)
2335                            throws java.io.IOException {
2336          getSerializedSize();
2337          if (((bitField0_ & 0x00000001) == 0x00000001)) {
2338            output.writeMessage(1, reqInfo_);
2339          }
2340          if (((bitField0_ & 0x00000002) == 0x00000002)) {
2341            output.writeUInt64(2, firstTxnId_);
2342          }
2343          if (((bitField0_ & 0x00000004) == 0x00000004)) {
2344            output.writeUInt32(3, numTxns_);
2345          }
2346          if (((bitField0_ & 0x00000008) == 0x00000008)) {
2347            output.writeBytes(4, records_);
2348          }
2349          if (((bitField0_ & 0x00000010) == 0x00000010)) {
2350            output.writeUInt64(5, segmentTxnId_);
2351          }
2352          getUnknownFields().writeTo(output);
2353        }
2354        
2355        private int memoizedSerializedSize = -1;
2356        public int getSerializedSize() {
2357          int size = memoizedSerializedSize;
2358          if (size != -1) return size;
2359        
2360          size = 0;
2361          if (((bitField0_ & 0x00000001) == 0x00000001)) {
2362            size += com.google.protobuf.CodedOutputStream
2363              .computeMessageSize(1, reqInfo_);
2364          }
2365          if (((bitField0_ & 0x00000002) == 0x00000002)) {
2366            size += com.google.protobuf.CodedOutputStream
2367              .computeUInt64Size(2, firstTxnId_);
2368          }
2369          if (((bitField0_ & 0x00000004) == 0x00000004)) {
2370            size += com.google.protobuf.CodedOutputStream
2371              .computeUInt32Size(3, numTxns_);
2372          }
2373          if (((bitField0_ & 0x00000008) == 0x00000008)) {
2374            size += com.google.protobuf.CodedOutputStream
2375              .computeBytesSize(4, records_);
2376          }
2377          if (((bitField0_ & 0x00000010) == 0x00000010)) {
2378            size += com.google.protobuf.CodedOutputStream
2379              .computeUInt64Size(5, segmentTxnId_);
2380          }
2381          size += getUnknownFields().getSerializedSize();
2382          memoizedSerializedSize = size;
2383          return size;
2384        }
2385        
2386        private static final long serialVersionUID = 0L;
2387        @java.lang.Override
2388        protected java.lang.Object writeReplace()
2389            throws java.io.ObjectStreamException {
2390          return super.writeReplace();
2391        }
2392        
2393        @java.lang.Override
2394        public boolean equals(final java.lang.Object obj) {
2395          if (obj == this) {
2396           return true;
2397          }
2398          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)) {
2399            return super.equals(obj);
2400          }
2401          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto) obj;
2402          
2403          boolean result = true;
2404          result = result && (hasReqInfo() == other.hasReqInfo());
2405          if (hasReqInfo()) {
2406            result = result && getReqInfo()
2407                .equals(other.getReqInfo());
2408          }
2409          result = result && (hasFirstTxnId() == other.hasFirstTxnId());
2410          if (hasFirstTxnId()) {
2411            result = result && (getFirstTxnId()
2412                == other.getFirstTxnId());
2413          }
2414          result = result && (hasNumTxns() == other.hasNumTxns());
2415          if (hasNumTxns()) {
2416            result = result && (getNumTxns()
2417                == other.getNumTxns());
2418          }
2419          result = result && (hasRecords() == other.hasRecords());
2420          if (hasRecords()) {
2421            result = result && getRecords()
2422                .equals(other.getRecords());
2423          }
2424          result = result && (hasSegmentTxnId() == other.hasSegmentTxnId());
2425          if (hasSegmentTxnId()) {
2426            result = result && (getSegmentTxnId()
2427                == other.getSegmentTxnId());
2428          }
2429          result = result &&
2430              getUnknownFields().equals(other.getUnknownFields());
2431          return result;
2432        }
2433        
2434        @java.lang.Override
2435        public int hashCode() {
2436          int hash = 41;
2437          hash = (19 * hash) + getDescriptorForType().hashCode();
2438          if (hasReqInfo()) {
2439            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
2440            hash = (53 * hash) + getReqInfo().hashCode();
2441          }
2442          if (hasFirstTxnId()) {
2443            hash = (37 * hash) + FIRSTTXNID_FIELD_NUMBER;
2444            hash = (53 * hash) + hashLong(getFirstTxnId());
2445          }
2446          if (hasNumTxns()) {
2447            hash = (37 * hash) + NUMTXNS_FIELD_NUMBER;
2448            hash = (53 * hash) + getNumTxns();
2449          }
2450          if (hasRecords()) {
2451            hash = (37 * hash) + RECORDS_FIELD_NUMBER;
2452            hash = (53 * hash) + getRecords().hashCode();
2453          }
2454          if (hasSegmentTxnId()) {
2455            hash = (37 * hash) + SEGMENTTXNID_FIELD_NUMBER;
2456            hash = (53 * hash) + hashLong(getSegmentTxnId());
2457          }
2458          hash = (29 * hash) + getUnknownFields().hashCode();
2459          return hash;
2460        }
2461        
2462        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
2463            com.google.protobuf.ByteString data)
2464            throws com.google.protobuf.InvalidProtocolBufferException {
2465          return newBuilder().mergeFrom(data).buildParsed();
2466        }
2467        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
2468            com.google.protobuf.ByteString data,
2469            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2470            throws com.google.protobuf.InvalidProtocolBufferException {
2471          return newBuilder().mergeFrom(data, extensionRegistry)
2472                   .buildParsed();
2473        }
2474        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(byte[] data)
2475            throws com.google.protobuf.InvalidProtocolBufferException {
2476          return newBuilder().mergeFrom(data).buildParsed();
2477        }
2478        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
2479            byte[] data,
2480            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2481            throws com.google.protobuf.InvalidProtocolBufferException {
2482          return newBuilder().mergeFrom(data, extensionRegistry)
2483                   .buildParsed();
2484        }
2485        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(java.io.InputStream input)
2486            throws java.io.IOException {
2487          return newBuilder().mergeFrom(input).buildParsed();
2488        }
2489        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
2490            java.io.InputStream input,
2491            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2492            throws java.io.IOException {
2493          return newBuilder().mergeFrom(input, extensionRegistry)
2494                   .buildParsed();
2495        }
2496        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseDelimitedFrom(java.io.InputStream input)
2497            throws java.io.IOException {
2498          Builder builder = newBuilder();
2499          if (builder.mergeDelimitedFrom(input)) {
2500            return builder.buildParsed();
2501          } else {
2502            return null;
2503          }
2504        }
2505        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseDelimitedFrom(
2506            java.io.InputStream input,
2507            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2508            throws java.io.IOException {
2509          Builder builder = newBuilder();
2510          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
2511            return builder.buildParsed();
2512          } else {
2513            return null;
2514          }
2515        }
2516        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
2517            com.google.protobuf.CodedInputStream input)
2518            throws java.io.IOException {
2519          return newBuilder().mergeFrom(input).buildParsed();
2520        }
2521        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto parseFrom(
2522            com.google.protobuf.CodedInputStream input,
2523            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2524            throws java.io.IOException {
2525          return newBuilder().mergeFrom(input, extensionRegistry)
2526                   .buildParsed();
2527        }
2528        
2529        public static Builder newBuilder() { return Builder.create(); }
2530        public Builder newBuilderForType() { return newBuilder(); }
2531        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto prototype) {
2532          return newBuilder().mergeFrom(prototype);
2533        }
2534        public Builder toBuilder() { return newBuilder(this); }
2535        
2536        @java.lang.Override
2537        protected Builder newBuilderForType(
2538            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
2539          Builder builder = new Builder(parent);
2540          return builder;
2541        }
2542        public static final class Builder extends
2543            com.google.protobuf.GeneratedMessage.Builder<Builder>
2544           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProtoOrBuilder {
2545          public static final com.google.protobuf.Descriptors.Descriptor
2546              getDescriptor() {
2547            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalRequestProto_descriptor;
2548          }
2549          
2550          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
2551              internalGetFieldAccessorTable() {
2552            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalRequestProto_fieldAccessorTable;
2553          }
2554          
2555          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.newBuilder()
2556          private Builder() {
2557            maybeForceBuilderInitialization();
2558          }
2559          
2560          private Builder(BuilderParent parent) {
2561            super(parent);
2562            maybeForceBuilderInitialization();
2563          }
2564          private void maybeForceBuilderInitialization() {
2565            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
2566              getReqInfoFieldBuilder();
2567            }
2568          }
2569          private static Builder create() {
2570            return new Builder();
2571          }
2572          
2573          public Builder clear() {
2574            super.clear();
2575            if (reqInfoBuilder_ == null) {
2576              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
2577            } else {
2578              reqInfoBuilder_.clear();
2579            }
2580            bitField0_ = (bitField0_ & ~0x00000001);
2581            firstTxnId_ = 0L;
2582            bitField0_ = (bitField0_ & ~0x00000002);
2583            numTxns_ = 0;
2584            bitField0_ = (bitField0_ & ~0x00000004);
2585            records_ = com.google.protobuf.ByteString.EMPTY;
2586            bitField0_ = (bitField0_ & ~0x00000008);
2587            segmentTxnId_ = 0L;
2588            bitField0_ = (bitField0_ & ~0x00000010);
2589            return this;
2590          }
2591          
2592          public Builder clone() {
2593            return create().mergeFrom(buildPartial());
2594          }
2595          
2596          public com.google.protobuf.Descriptors.Descriptor
2597              getDescriptorForType() {
2598            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDescriptor();
2599          }
2600          
2601          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto getDefaultInstanceForType() {
2602            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance();
2603          }
2604          
2605          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto build() {
2606            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto result = buildPartial();
2607            if (!result.isInitialized()) {
2608              throw newUninitializedMessageException(result);
2609            }
2610            return result;
2611          }
2612          
2613          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto buildParsed()
2614              throws com.google.protobuf.InvalidProtocolBufferException {
2615            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto result = buildPartial();
2616            if (!result.isInitialized()) {
2617              throw newUninitializedMessageException(
2618                result).asInvalidProtocolBufferException();
2619            }
2620            return result;
2621          }
2622          
2623          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto buildPartial() {
2624            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto(this);
2625            int from_bitField0_ = bitField0_;
2626            int to_bitField0_ = 0;
2627            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
2628              to_bitField0_ |= 0x00000001;
2629            }
2630            if (reqInfoBuilder_ == null) {
2631              result.reqInfo_ = reqInfo_;
2632            } else {
2633              result.reqInfo_ = reqInfoBuilder_.build();
2634            }
2635            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
2636              to_bitField0_ |= 0x00000002;
2637            }
2638            result.firstTxnId_ = firstTxnId_;
2639            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
2640              to_bitField0_ |= 0x00000004;
2641            }
2642            result.numTxns_ = numTxns_;
2643            if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
2644              to_bitField0_ |= 0x00000008;
2645            }
2646            result.records_ = records_;
2647            if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
2648              to_bitField0_ |= 0x00000010;
2649            }
2650            result.segmentTxnId_ = segmentTxnId_;
2651            result.bitField0_ = to_bitField0_;
2652            onBuilt();
2653            return result;
2654          }
2655          
2656          public Builder mergeFrom(com.google.protobuf.Message other) {
2657            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto) {
2658              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)other);
2659            } else {
2660              super.mergeFrom(other);
2661              return this;
2662            }
2663          }
2664          
2665          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto other) {
2666            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance()) return this;
2667            if (other.hasReqInfo()) {
2668              mergeReqInfo(other.getReqInfo());
2669            }
2670            if (other.hasFirstTxnId()) {
2671              setFirstTxnId(other.getFirstTxnId());
2672            }
2673            if (other.hasNumTxns()) {
2674              setNumTxns(other.getNumTxns());
2675            }
2676            if (other.hasRecords()) {
2677              setRecords(other.getRecords());
2678            }
2679            if (other.hasSegmentTxnId()) {
2680              setSegmentTxnId(other.getSegmentTxnId());
2681            }
2682            this.mergeUnknownFields(other.getUnknownFields());
2683            return this;
2684          }
2685          
2686          public final boolean isInitialized() {
2687            if (!hasReqInfo()) {
2688              
2689              return false;
2690            }
2691            if (!hasFirstTxnId()) {
2692              
2693              return false;
2694            }
2695            if (!hasNumTxns()) {
2696              
2697              return false;
2698            }
2699            if (!hasRecords()) {
2700              
2701              return false;
2702            }
2703            if (!hasSegmentTxnId()) {
2704              
2705              return false;
2706            }
2707            if (!getReqInfo().isInitialized()) {
2708              
2709              return false;
2710            }
2711            return true;
2712          }
2713          
2714          public Builder mergeFrom(
2715              com.google.protobuf.CodedInputStream input,
2716              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
2717              throws java.io.IOException {
2718            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
2719              com.google.protobuf.UnknownFieldSet.newBuilder(
2720                this.getUnknownFields());
2721            while (true) {
2722              int tag = input.readTag();
2723              switch (tag) {
2724                case 0:
2725                  this.setUnknownFields(unknownFields.build());
2726                  onChanged();
2727                  return this;
2728                default: {
2729                  if (!parseUnknownField(input, unknownFields,
2730                                         extensionRegistry, tag)) {
2731                    this.setUnknownFields(unknownFields.build());
2732                    onChanged();
2733                    return this;
2734                  }
2735                  break;
2736                }
2737                case 10: {
2738                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder();
2739                  if (hasReqInfo()) {
2740                    subBuilder.mergeFrom(getReqInfo());
2741                  }
2742                  input.readMessage(subBuilder, extensionRegistry);
2743                  setReqInfo(subBuilder.buildPartial());
2744                  break;
2745                }
2746                case 16: {
2747                  bitField0_ |= 0x00000002;
2748                  firstTxnId_ = input.readUInt64();
2749                  break;
2750                }
2751                case 24: {
2752                  bitField0_ |= 0x00000004;
2753                  numTxns_ = input.readUInt32();
2754                  break;
2755                }
2756                case 34: {
2757                  bitField0_ |= 0x00000008;
2758                  records_ = input.readBytes();
2759                  break;
2760                }
2761                case 40: {
2762                  bitField0_ |= 0x00000010;
2763                  segmentTxnId_ = input.readUInt64();
2764                  break;
2765                }
2766              }
2767            }
2768          }
2769          
2770          private int bitField0_;
2771          
2772          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
2773          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
2774          private com.google.protobuf.SingleFieldBuilder<
2775              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
2776          public boolean hasReqInfo() {
2777            return ((bitField0_ & 0x00000001) == 0x00000001);
2778          }
2779          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
2780            if (reqInfoBuilder_ == null) {
2781              return reqInfo_;
2782            } else {
2783              return reqInfoBuilder_.getMessage();
2784            }
2785          }
2786          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
2787            if (reqInfoBuilder_ == null) {
2788              if (value == null) {
2789                throw new NullPointerException();
2790              }
2791              reqInfo_ = value;
2792              onChanged();
2793            } else {
2794              reqInfoBuilder_.setMessage(value);
2795            }
2796            bitField0_ |= 0x00000001;
2797            return this;
2798          }
2799          public Builder setReqInfo(
2800              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
2801            if (reqInfoBuilder_ == null) {
2802              reqInfo_ = builderForValue.build();
2803              onChanged();
2804            } else {
2805              reqInfoBuilder_.setMessage(builderForValue.build());
2806            }
2807            bitField0_ |= 0x00000001;
2808            return this;
2809          }
2810          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
2811            if (reqInfoBuilder_ == null) {
2812              if (((bitField0_ & 0x00000001) == 0x00000001) &&
2813                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
2814                reqInfo_ =
2815                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
2816              } else {
2817                reqInfo_ = value;
2818              }
2819              onChanged();
2820            } else {
2821              reqInfoBuilder_.mergeFrom(value);
2822            }
2823            bitField0_ |= 0x00000001;
2824            return this;
2825          }
2826          public Builder clearReqInfo() {
2827            if (reqInfoBuilder_ == null) {
2828              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
2829              onChanged();
2830            } else {
2831              reqInfoBuilder_.clear();
2832            }
2833            bitField0_ = (bitField0_ & ~0x00000001);
2834            return this;
2835          }
2836          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
2837            bitField0_ |= 0x00000001;
2838            onChanged();
2839            return getReqInfoFieldBuilder().getBuilder();
2840          }
2841          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
2842            if (reqInfoBuilder_ != null) {
2843              return reqInfoBuilder_.getMessageOrBuilder();
2844            } else {
2845              return reqInfo_;
2846            }
2847          }
2848          private com.google.protobuf.SingleFieldBuilder<
2849              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
2850              getReqInfoFieldBuilder() {
2851            if (reqInfoBuilder_ == null) {
2852              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
2853                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
2854                      reqInfo_,
2855                      getParentForChildren(),
2856                      isClean());
2857              reqInfo_ = null;
2858            }
2859            return reqInfoBuilder_;
2860          }
2861          
2862          // required uint64 firstTxnId = 2;
2863          private long firstTxnId_ ;
2864          public boolean hasFirstTxnId() {
2865            return ((bitField0_ & 0x00000002) == 0x00000002);
2866          }
2867          public long getFirstTxnId() {
2868            return firstTxnId_;
2869          }
2870          public Builder setFirstTxnId(long value) {
2871            bitField0_ |= 0x00000002;
2872            firstTxnId_ = value;
2873            onChanged();
2874            return this;
2875          }
2876          public Builder clearFirstTxnId() {
2877            bitField0_ = (bitField0_ & ~0x00000002);
2878            firstTxnId_ = 0L;
2879            onChanged();
2880            return this;
2881          }
2882          
2883          // required uint32 numTxns = 3;
2884          private int numTxns_ ;
2885          public boolean hasNumTxns() {
2886            return ((bitField0_ & 0x00000004) == 0x00000004);
2887          }
2888          public int getNumTxns() {
2889            return numTxns_;
2890          }
2891          public Builder setNumTxns(int value) {
2892            bitField0_ |= 0x00000004;
2893            numTxns_ = value;
2894            onChanged();
2895            return this;
2896          }
2897          public Builder clearNumTxns() {
2898            bitField0_ = (bitField0_ & ~0x00000004);
2899            numTxns_ = 0;
2900            onChanged();
2901            return this;
2902          }
2903          
2904          // required bytes records = 4;
2905          private com.google.protobuf.ByteString records_ = com.google.protobuf.ByteString.EMPTY;
2906          public boolean hasRecords() {
2907            return ((bitField0_ & 0x00000008) == 0x00000008);
2908          }
2909          public com.google.protobuf.ByteString getRecords() {
2910            return records_;
2911          }
2912          public Builder setRecords(com.google.protobuf.ByteString value) {
2913            if (value == null) {
2914        throw new NullPointerException();
2915      }
2916      bitField0_ |= 0x00000008;
2917            records_ = value;
2918            onChanged();
2919            return this;
2920          }
2921          public Builder clearRecords() {
2922            bitField0_ = (bitField0_ & ~0x00000008);
2923            records_ = getDefaultInstance().getRecords();
2924            onChanged();
2925            return this;
2926          }
2927          
2928          // required uint64 segmentTxnId = 5;
2929          private long segmentTxnId_ ;
2930          public boolean hasSegmentTxnId() {
2931            return ((bitField0_ & 0x00000010) == 0x00000010);
2932          }
2933          public long getSegmentTxnId() {
2934            return segmentTxnId_;
2935          }
2936          public Builder setSegmentTxnId(long value) {
2937            bitField0_ |= 0x00000010;
2938            segmentTxnId_ = value;
2939            onChanged();
2940            return this;
2941          }
2942          public Builder clearSegmentTxnId() {
2943            bitField0_ = (bitField0_ & ~0x00000010);
2944            segmentTxnId_ = 0L;
2945            onChanged();
2946            return this;
2947          }
2948          
2949          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.JournalRequestProto)
2950        }
2951        
2952        static {
2953          defaultInstance = new JournalRequestProto(true);
2954          defaultInstance.initFields();
2955        }
2956        
2957        // @@protoc_insertion_point(class_scope:hadoop.hdfs.JournalRequestProto)
2958      }
2959      
2960      public interface JournalResponseProtoOrBuilder
2961          extends com.google.protobuf.MessageOrBuilder {
2962      }
2963      public static final class JournalResponseProto extends
2964          com.google.protobuf.GeneratedMessage
2965          implements JournalResponseProtoOrBuilder {
2966        // Use JournalResponseProto.newBuilder() to construct.
2967        private JournalResponseProto(Builder builder) {
2968          super(builder);
2969        }
2970        private JournalResponseProto(boolean noInit) {}
2971        
2972        private static final JournalResponseProto defaultInstance;
2973        public static JournalResponseProto getDefaultInstance() {
2974          return defaultInstance;
2975        }
2976        
2977        public JournalResponseProto getDefaultInstanceForType() {
2978          return defaultInstance;
2979        }
2980        
2981        public static final com.google.protobuf.Descriptors.Descriptor
2982            getDescriptor() {
2983          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalResponseProto_descriptor;
2984        }
2985        
2986        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
2987            internalGetFieldAccessorTable() {
2988          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalResponseProto_fieldAccessorTable;
2989        }
2990        
2991        private void initFields() {
2992        }
2993        private byte memoizedIsInitialized = -1;
2994        public final boolean isInitialized() {
2995          byte isInitialized = memoizedIsInitialized;
2996          if (isInitialized != -1) return isInitialized == 1;
2997          
2998          memoizedIsInitialized = 1;
2999          return true;
3000        }
3001        
3002        public void writeTo(com.google.protobuf.CodedOutputStream output)
3003                            throws java.io.IOException {
3004          getSerializedSize();
3005          getUnknownFields().writeTo(output);
3006        }
3007        
3008        private int memoizedSerializedSize = -1;
3009        public int getSerializedSize() {
3010          int size = memoizedSerializedSize;
3011          if (size != -1) return size;
3012        
3013          size = 0;
3014          size += getUnknownFields().getSerializedSize();
3015          memoizedSerializedSize = size;
3016          return size;
3017        }
3018        
3019        private static final long serialVersionUID = 0L;
3020        @java.lang.Override
3021        protected java.lang.Object writeReplace()
3022            throws java.io.ObjectStreamException {
3023          return super.writeReplace();
3024        }
3025        
3026        @java.lang.Override
3027        public boolean equals(final java.lang.Object obj) {
3028          if (obj == this) {
3029           return true;
3030          }
3031          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto)) {
3032            return super.equals(obj);
3033          }
3034          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) obj;
3035          
3036          boolean result = true;
3037          result = result &&
3038              getUnknownFields().equals(other.getUnknownFields());
3039          return result;
3040        }
3041        
3042        @java.lang.Override
3043        public int hashCode() {
3044          int hash = 41;
3045          hash = (19 * hash) + getDescriptorForType().hashCode();
3046          hash = (29 * hash) + getUnknownFields().hashCode();
3047          return hash;
3048        }
3049        
3050        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3051            com.google.protobuf.ByteString data)
3052            throws com.google.protobuf.InvalidProtocolBufferException {
3053          return newBuilder().mergeFrom(data).buildParsed();
3054        }
3055        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3056            com.google.protobuf.ByteString data,
3057            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3058            throws com.google.protobuf.InvalidProtocolBufferException {
3059          return newBuilder().mergeFrom(data, extensionRegistry)
3060                   .buildParsed();
3061        }
3062        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(byte[] data)
3063            throws com.google.protobuf.InvalidProtocolBufferException {
3064          return newBuilder().mergeFrom(data).buildParsed();
3065        }
3066        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3067            byte[] data,
3068            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3069            throws com.google.protobuf.InvalidProtocolBufferException {
3070          return newBuilder().mergeFrom(data, extensionRegistry)
3071                   .buildParsed();
3072        }
3073        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(java.io.InputStream input)
3074            throws java.io.IOException {
3075          return newBuilder().mergeFrom(input).buildParsed();
3076        }
3077        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3078            java.io.InputStream input,
3079            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3080            throws java.io.IOException {
3081          return newBuilder().mergeFrom(input, extensionRegistry)
3082                   .buildParsed();
3083        }
3084        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseDelimitedFrom(java.io.InputStream input)
3085            throws java.io.IOException {
3086          Builder builder = newBuilder();
3087          if (builder.mergeDelimitedFrom(input)) {
3088            return builder.buildParsed();
3089          } else {
3090            return null;
3091          }
3092        }
3093        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseDelimitedFrom(
3094            java.io.InputStream input,
3095            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3096            throws java.io.IOException {
3097          Builder builder = newBuilder();
3098          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
3099            return builder.buildParsed();
3100          } else {
3101            return null;
3102          }
3103        }
3104        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3105            com.google.protobuf.CodedInputStream input)
3106            throws java.io.IOException {
3107          return newBuilder().mergeFrom(input).buildParsed();
3108        }
3109        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto parseFrom(
3110            com.google.protobuf.CodedInputStream input,
3111            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3112            throws java.io.IOException {
3113          return newBuilder().mergeFrom(input, extensionRegistry)
3114                   .buildParsed();
3115        }
3116        
3117        public static Builder newBuilder() { return Builder.create(); }
3118        public Builder newBuilderForType() { return newBuilder(); }
3119        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto prototype) {
3120          return newBuilder().mergeFrom(prototype);
3121        }
3122        public Builder toBuilder() { return newBuilder(this); }
3123        
3124        @java.lang.Override
3125        protected Builder newBuilderForType(
3126            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3127          Builder builder = new Builder(parent);
3128          return builder;
3129        }
3130        public static final class Builder extends
3131            com.google.protobuf.GeneratedMessage.Builder<Builder>
3132           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProtoOrBuilder {
3133          public static final com.google.protobuf.Descriptors.Descriptor
3134              getDescriptor() {
3135            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalResponseProto_descriptor;
3136          }
3137          
3138          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
3139              internalGetFieldAccessorTable() {
3140            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_JournalResponseProto_fieldAccessorTable;
3141          }
3142          
3143          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.newBuilder()
3144          private Builder() {
3145            maybeForceBuilderInitialization();
3146          }
3147          
3148          private Builder(BuilderParent parent) {
3149            super(parent);
3150            maybeForceBuilderInitialization();
3151          }
3152          private void maybeForceBuilderInitialization() {
3153            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
3154            }
3155          }
3156          private static Builder create() {
3157            return new Builder();
3158          }
3159          
3160          public Builder clear() {
3161            super.clear();
3162            return this;
3163          }
3164          
3165          public Builder clone() {
3166            return create().mergeFrom(buildPartial());
3167          }
3168          
3169          public com.google.protobuf.Descriptors.Descriptor
3170              getDescriptorForType() {
3171            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDescriptor();
3172          }
3173          
3174          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto getDefaultInstanceForType() {
3175            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance();
3176          }
3177          
3178          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto build() {
3179            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto result = buildPartial();
3180            if (!result.isInitialized()) {
3181              throw newUninitializedMessageException(result);
3182            }
3183            return result;
3184          }
3185          
3186          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto buildParsed()
3187              throws com.google.protobuf.InvalidProtocolBufferException {
3188            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto result = buildPartial();
3189            if (!result.isInitialized()) {
3190              throw newUninitializedMessageException(
3191                result).asInvalidProtocolBufferException();
3192            }
3193            return result;
3194          }
3195          
3196          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto buildPartial() {
3197            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto(this);
3198            onBuilt();
3199            return result;
3200          }
3201          
3202          public Builder mergeFrom(com.google.protobuf.Message other) {
3203            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) {
3204              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto)other);
3205            } else {
3206              super.mergeFrom(other);
3207              return this;
3208            }
3209          }
3210          
3211          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto other) {
3212            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance()) return this;
3213            this.mergeUnknownFields(other.getUnknownFields());
3214            return this;
3215          }
3216          
3217          public final boolean isInitialized() {
3218            return true;
3219          }
3220          
3221          public Builder mergeFrom(
3222              com.google.protobuf.CodedInputStream input,
3223              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3224              throws java.io.IOException {
3225            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
3226              com.google.protobuf.UnknownFieldSet.newBuilder(
3227                this.getUnknownFields());
3228            while (true) {
3229              int tag = input.readTag();
3230              switch (tag) {
3231                case 0:
3232                  this.setUnknownFields(unknownFields.build());
3233                  onChanged();
3234                  return this;
3235                default: {
3236                  if (!parseUnknownField(input, unknownFields,
3237                                         extensionRegistry, tag)) {
3238                    this.setUnknownFields(unknownFields.build());
3239                    onChanged();
3240                    return this;
3241                  }
3242                  break;
3243                }
3244              }
3245            }
3246          }
3247          
3248          
3249          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.JournalResponseProto)
3250        }
3251        
3252        static {
3253          defaultInstance = new JournalResponseProto(true);
3254          defaultInstance.initFields();
3255        }
3256        
3257        // @@protoc_insertion_point(class_scope:hadoop.hdfs.JournalResponseProto)
3258      }
3259      
3260      public interface HeartbeatRequestProtoOrBuilder
3261          extends com.google.protobuf.MessageOrBuilder {
3262        
3263        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
3264        boolean hasReqInfo();
3265        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
3266        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
3267      }
3268      public static final class HeartbeatRequestProto extends
3269          com.google.protobuf.GeneratedMessage
3270          implements HeartbeatRequestProtoOrBuilder {
3271        // Use HeartbeatRequestProto.newBuilder() to construct.
3272        private HeartbeatRequestProto(Builder builder) {
3273          super(builder);
3274        }
3275        private HeartbeatRequestProto(boolean noInit) {}
3276        
3277        private static final HeartbeatRequestProto defaultInstance;
3278        public static HeartbeatRequestProto getDefaultInstance() {
3279          return defaultInstance;
3280        }
3281        
3282        public HeartbeatRequestProto getDefaultInstanceForType() {
3283          return defaultInstance;
3284        }
3285        
3286        public static final com.google.protobuf.Descriptors.Descriptor
3287            getDescriptor() {
3288          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor;
3289        }
3290        
3291        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
3292            internalGetFieldAccessorTable() {
3293          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatRequestProto_fieldAccessorTable;
3294        }
3295        
3296        private int bitField0_;
3297        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
3298        public static final int REQINFO_FIELD_NUMBER = 1;
3299        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
3300        public boolean hasReqInfo() {
3301          return ((bitField0_ & 0x00000001) == 0x00000001);
3302        }
3303        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
3304          return reqInfo_;
3305        }
3306        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
3307          return reqInfo_;
3308        }
3309        
3310        private void initFields() {
3311          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
3312        }
3313        private byte memoizedIsInitialized = -1;
3314        public final boolean isInitialized() {
3315          byte isInitialized = memoizedIsInitialized;
3316          if (isInitialized != -1) return isInitialized == 1;
3317          
3318          if (!hasReqInfo()) {
3319            memoizedIsInitialized = 0;
3320            return false;
3321          }
3322          if (!getReqInfo().isInitialized()) {
3323            memoizedIsInitialized = 0;
3324            return false;
3325          }
3326          memoizedIsInitialized = 1;
3327          return true;
3328        }
3329        
3330        public void writeTo(com.google.protobuf.CodedOutputStream output)
3331                            throws java.io.IOException {
3332          getSerializedSize();
3333          if (((bitField0_ & 0x00000001) == 0x00000001)) {
3334            output.writeMessage(1, reqInfo_);
3335          }
3336          getUnknownFields().writeTo(output);
3337        }
3338        
3339        private int memoizedSerializedSize = -1;
3340        public int getSerializedSize() {
3341          int size = memoizedSerializedSize;
3342          if (size != -1) return size;
3343        
3344          size = 0;
3345          if (((bitField0_ & 0x00000001) == 0x00000001)) {
3346            size += com.google.protobuf.CodedOutputStream
3347              .computeMessageSize(1, reqInfo_);
3348          }
3349          size += getUnknownFields().getSerializedSize();
3350          memoizedSerializedSize = size;
3351          return size;
3352        }
3353        
3354        private static final long serialVersionUID = 0L;
3355        @java.lang.Override
3356        protected java.lang.Object writeReplace()
3357            throws java.io.ObjectStreamException {
3358          return super.writeReplace();
3359        }
3360        
3361        @java.lang.Override
3362        public boolean equals(final java.lang.Object obj) {
3363          if (obj == this) {
3364           return true;
3365          }
3366          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)) {
3367            return super.equals(obj);
3368          }
3369          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto) obj;
3370          
3371          boolean result = true;
3372          result = result && (hasReqInfo() == other.hasReqInfo());
3373          if (hasReqInfo()) {
3374            result = result && getReqInfo()
3375                .equals(other.getReqInfo());
3376          }
3377          result = result &&
3378              getUnknownFields().equals(other.getUnknownFields());
3379          return result;
3380        }
3381        
3382        @java.lang.Override
3383        public int hashCode() {
3384          int hash = 41;
3385          hash = (19 * hash) + getDescriptorForType().hashCode();
3386          if (hasReqInfo()) {
3387            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
3388            hash = (53 * hash) + getReqInfo().hashCode();
3389          }
3390          hash = (29 * hash) + getUnknownFields().hashCode();
3391          return hash;
3392        }
3393        
3394        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
3395            com.google.protobuf.ByteString data)
3396            throws com.google.protobuf.InvalidProtocolBufferException {
3397          return newBuilder().mergeFrom(data).buildParsed();
3398        }
3399        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
3400            com.google.protobuf.ByteString data,
3401            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3402            throws com.google.protobuf.InvalidProtocolBufferException {
3403          return newBuilder().mergeFrom(data, extensionRegistry)
3404                   .buildParsed();
3405        }
3406        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(byte[] data)
3407            throws com.google.protobuf.InvalidProtocolBufferException {
3408          return newBuilder().mergeFrom(data).buildParsed();
3409        }
3410        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
3411            byte[] data,
3412            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3413            throws com.google.protobuf.InvalidProtocolBufferException {
3414          return newBuilder().mergeFrom(data, extensionRegistry)
3415                   .buildParsed();
3416        }
3417        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(java.io.InputStream input)
3418            throws java.io.IOException {
3419          return newBuilder().mergeFrom(input).buildParsed();
3420        }
3421        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
3422            java.io.InputStream input,
3423            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3424            throws java.io.IOException {
3425          return newBuilder().mergeFrom(input, extensionRegistry)
3426                   .buildParsed();
3427        }
3428        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseDelimitedFrom(java.io.InputStream input)
3429            throws java.io.IOException {
3430          Builder builder = newBuilder();
3431          if (builder.mergeDelimitedFrom(input)) {
3432            return builder.buildParsed();
3433          } else {
3434            return null;
3435          }
3436        }
3437        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseDelimitedFrom(
3438            java.io.InputStream input,
3439            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3440            throws java.io.IOException {
3441          Builder builder = newBuilder();
3442          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
3443            return builder.buildParsed();
3444          } else {
3445            return null;
3446          }
3447        }
3448        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
3449            com.google.protobuf.CodedInputStream input)
3450            throws java.io.IOException {
3451          return newBuilder().mergeFrom(input).buildParsed();
3452        }
3453        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto parseFrom(
3454            com.google.protobuf.CodedInputStream input,
3455            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3456            throws java.io.IOException {
3457          return newBuilder().mergeFrom(input, extensionRegistry)
3458                   .buildParsed();
3459        }
3460        
3461        public static Builder newBuilder() { return Builder.create(); }
3462        public Builder newBuilderForType() { return newBuilder(); }
3463        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto prototype) {
3464          return newBuilder().mergeFrom(prototype);
3465        }
3466        public Builder toBuilder() { return newBuilder(this); }
3467        
3468        @java.lang.Override
3469        protected Builder newBuilderForType(
3470            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3471          Builder builder = new Builder(parent);
3472          return builder;
3473        }
3474        public static final class Builder extends
3475            com.google.protobuf.GeneratedMessage.Builder<Builder>
3476           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProtoOrBuilder {
3477          public static final com.google.protobuf.Descriptors.Descriptor
3478              getDescriptor() {
3479            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor;
3480          }
3481          
3482          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
3483              internalGetFieldAccessorTable() {
3484            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatRequestProto_fieldAccessorTable;
3485          }
3486          
3487          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.newBuilder()
3488          private Builder() {
3489            maybeForceBuilderInitialization();
3490          }
3491          
3492          private Builder(BuilderParent parent) {
3493            super(parent);
3494            maybeForceBuilderInitialization();
3495          }
3496          private void maybeForceBuilderInitialization() {
3497            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
3498              getReqInfoFieldBuilder();
3499            }
3500          }
3501          private static Builder create() {
3502            return new Builder();
3503          }
3504          
3505          public Builder clear() {
3506            super.clear();
3507            if (reqInfoBuilder_ == null) {
3508              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
3509            } else {
3510              reqInfoBuilder_.clear();
3511            }
3512            bitField0_ = (bitField0_ & ~0x00000001);
3513            return this;
3514          }
3515          
3516          public Builder clone() {
3517            return create().mergeFrom(buildPartial());
3518          }
3519          
3520          public com.google.protobuf.Descriptors.Descriptor
3521              getDescriptorForType() {
3522            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDescriptor();
3523          }
3524          
3525          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto getDefaultInstanceForType() {
3526            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
3527          }
3528          
3529          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto build() {
3530            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto result = buildPartial();
3531            if (!result.isInitialized()) {
3532              throw newUninitializedMessageException(result);
3533            }
3534            return result;
3535          }
3536          
3537          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto buildParsed()
3538              throws com.google.protobuf.InvalidProtocolBufferException {
3539            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto result = buildPartial();
3540            if (!result.isInitialized()) {
3541              throw newUninitializedMessageException(
3542                result).asInvalidProtocolBufferException();
3543            }
3544            return result;
3545          }
3546          
3547          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto buildPartial() {
3548            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto(this);
3549            int from_bitField0_ = bitField0_;
3550            int to_bitField0_ = 0;
3551            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
3552              to_bitField0_ |= 0x00000001;
3553            }
3554            if (reqInfoBuilder_ == null) {
3555              result.reqInfo_ = reqInfo_;
3556            } else {
3557              result.reqInfo_ = reqInfoBuilder_.build();
3558            }
3559            result.bitField0_ = to_bitField0_;
3560            onBuilt();
3561            return result;
3562          }
3563          
3564          public Builder mergeFrom(com.google.protobuf.Message other) {
3565            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto) {
3566              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)other);
3567            } else {
3568              super.mergeFrom(other);
3569              return this;
3570            }
3571          }
3572          
3573          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto other) {
3574            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance()) return this;
3575            if (other.hasReqInfo()) {
3576              mergeReqInfo(other.getReqInfo());
3577            }
3578            this.mergeUnknownFields(other.getUnknownFields());
3579            return this;
3580          }
3581          
3582          public final boolean isInitialized() {
3583            if (!hasReqInfo()) {
3584              
3585              return false;
3586            }
3587            if (!getReqInfo().isInitialized()) {
3588              
3589              return false;
3590            }
3591            return true;
3592          }
3593          
3594          public Builder mergeFrom(
3595              com.google.protobuf.CodedInputStream input,
3596              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3597              throws java.io.IOException {
3598            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
3599              com.google.protobuf.UnknownFieldSet.newBuilder(
3600                this.getUnknownFields());
3601            while (true) {
3602              int tag = input.readTag();
3603              switch (tag) {
3604                case 0:
3605                  this.setUnknownFields(unknownFields.build());
3606                  onChanged();
3607                  return this;
3608                default: {
3609                  if (!parseUnknownField(input, unknownFields,
3610                                         extensionRegistry, tag)) {
3611                    this.setUnknownFields(unknownFields.build());
3612                    onChanged();
3613                    return this;
3614                  }
3615                  break;
3616                }
3617                case 10: {
3618                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder();
3619                  if (hasReqInfo()) {
3620                    subBuilder.mergeFrom(getReqInfo());
3621                  }
3622                  input.readMessage(subBuilder, extensionRegistry);
3623                  setReqInfo(subBuilder.buildPartial());
3624                  break;
3625                }
3626              }
3627            }
3628          }
3629          
3630          private int bitField0_;
3631          
3632          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
3633          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
3634          private com.google.protobuf.SingleFieldBuilder<
3635              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
3636          public boolean hasReqInfo() {
3637            return ((bitField0_ & 0x00000001) == 0x00000001);
3638          }
3639          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
3640            if (reqInfoBuilder_ == null) {
3641              return reqInfo_;
3642            } else {
3643              return reqInfoBuilder_.getMessage();
3644            }
3645          }
3646          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
3647            if (reqInfoBuilder_ == null) {
3648              if (value == null) {
3649                throw new NullPointerException();
3650              }
3651              reqInfo_ = value;
3652              onChanged();
3653            } else {
3654              reqInfoBuilder_.setMessage(value);
3655            }
3656            bitField0_ |= 0x00000001;
3657            return this;
3658          }
3659          public Builder setReqInfo(
3660              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
3661            if (reqInfoBuilder_ == null) {
3662              reqInfo_ = builderForValue.build();
3663              onChanged();
3664            } else {
3665              reqInfoBuilder_.setMessage(builderForValue.build());
3666            }
3667            bitField0_ |= 0x00000001;
3668            return this;
3669          }
3670          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
3671            if (reqInfoBuilder_ == null) {
3672              if (((bitField0_ & 0x00000001) == 0x00000001) &&
3673                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
3674                reqInfo_ =
3675                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
3676              } else {
3677                reqInfo_ = value;
3678              }
3679              onChanged();
3680            } else {
3681              reqInfoBuilder_.mergeFrom(value);
3682            }
3683            bitField0_ |= 0x00000001;
3684            return this;
3685          }
3686          public Builder clearReqInfo() {
3687            if (reqInfoBuilder_ == null) {
3688              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
3689              onChanged();
3690            } else {
3691              reqInfoBuilder_.clear();
3692            }
3693            bitField0_ = (bitField0_ & ~0x00000001);
3694            return this;
3695          }
3696          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
3697            bitField0_ |= 0x00000001;
3698            onChanged();
3699            return getReqInfoFieldBuilder().getBuilder();
3700          }
3701          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
3702            if (reqInfoBuilder_ != null) {
3703              return reqInfoBuilder_.getMessageOrBuilder();
3704            } else {
3705              return reqInfo_;
3706            }
3707          }
3708          private com.google.protobuf.SingleFieldBuilder<
3709              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
3710              getReqInfoFieldBuilder() {
3711            if (reqInfoBuilder_ == null) {
3712              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
3713                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
3714                      reqInfo_,
3715                      getParentForChildren(),
3716                      isClean());
3717              reqInfo_ = null;
3718            }
3719            return reqInfoBuilder_;
3720          }
3721          
3722          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.HeartbeatRequestProto)
3723        }
3724        
3725        static {
3726          defaultInstance = new HeartbeatRequestProto(true);
3727          defaultInstance.initFields();
3728        }
3729        
3730        // @@protoc_insertion_point(class_scope:hadoop.hdfs.HeartbeatRequestProto)
3731      }
3732      
3733      public interface HeartbeatResponseProtoOrBuilder
3734          extends com.google.protobuf.MessageOrBuilder {
3735      }
3736      public static final class HeartbeatResponseProto extends
3737          com.google.protobuf.GeneratedMessage
3738          implements HeartbeatResponseProtoOrBuilder {
3739        // Use HeartbeatResponseProto.newBuilder() to construct.
3740        private HeartbeatResponseProto(Builder builder) {
3741          super(builder);
3742        }
3743        private HeartbeatResponseProto(boolean noInit) {}
3744        
3745        private static final HeartbeatResponseProto defaultInstance;
3746        public static HeartbeatResponseProto getDefaultInstance() {
3747          return defaultInstance;
3748        }
3749        
3750        public HeartbeatResponseProto getDefaultInstanceForType() {
3751          return defaultInstance;
3752        }
3753        
3754        public static final com.google.protobuf.Descriptors.Descriptor
3755            getDescriptor() {
3756          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor;
3757        }
3758        
3759        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
3760            internalGetFieldAccessorTable() {
3761          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatResponseProto_fieldAccessorTable;
3762        }
3763        
3764        private void initFields() {
3765        }
3766        private byte memoizedIsInitialized = -1;
3767        public final boolean isInitialized() {
3768          byte isInitialized = memoizedIsInitialized;
3769          if (isInitialized != -1) return isInitialized == 1;
3770          
3771          memoizedIsInitialized = 1;
3772          return true;
3773        }
3774        
3775        public void writeTo(com.google.protobuf.CodedOutputStream output)
3776                            throws java.io.IOException {
3777          getSerializedSize();
3778          getUnknownFields().writeTo(output);
3779        }
3780        
3781        private int memoizedSerializedSize = -1;
3782        public int getSerializedSize() {
3783          int size = memoizedSerializedSize;
3784          if (size != -1) return size;
3785        
3786          size = 0;
3787          size += getUnknownFields().getSerializedSize();
3788          memoizedSerializedSize = size;
3789          return size;
3790        }
3791        
3792        private static final long serialVersionUID = 0L;
3793        @java.lang.Override
3794        protected java.lang.Object writeReplace()
3795            throws java.io.ObjectStreamException {
3796          return super.writeReplace();
3797        }
3798        
3799        @java.lang.Override
3800        public boolean equals(final java.lang.Object obj) {
3801          if (obj == this) {
3802           return true;
3803          }
3804          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto)) {
3805            return super.equals(obj);
3806          }
3807          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) obj;
3808          
3809          boolean result = true;
3810          result = result &&
3811              getUnknownFields().equals(other.getUnknownFields());
3812          return result;
3813        }
3814        
3815        @java.lang.Override
3816        public int hashCode() {
3817          int hash = 41;
3818          hash = (19 * hash) + getDescriptorForType().hashCode();
3819          hash = (29 * hash) + getUnknownFields().hashCode();
3820          return hash;
3821        }
3822        
3823        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
3824            com.google.protobuf.ByteString data)
3825            throws com.google.protobuf.InvalidProtocolBufferException {
3826          return newBuilder().mergeFrom(data).buildParsed();
3827        }
3828        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
3829            com.google.protobuf.ByteString data,
3830            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3831            throws com.google.protobuf.InvalidProtocolBufferException {
3832          return newBuilder().mergeFrom(data, extensionRegistry)
3833                   .buildParsed();
3834        }
3835        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(byte[] data)
3836            throws com.google.protobuf.InvalidProtocolBufferException {
3837          return newBuilder().mergeFrom(data).buildParsed();
3838        }
3839        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
3840            byte[] data,
3841            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3842            throws com.google.protobuf.InvalidProtocolBufferException {
3843          return newBuilder().mergeFrom(data, extensionRegistry)
3844                   .buildParsed();
3845        }
3846        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(java.io.InputStream input)
3847            throws java.io.IOException {
3848          return newBuilder().mergeFrom(input).buildParsed();
3849        }
3850        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
3851            java.io.InputStream input,
3852            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3853            throws java.io.IOException {
3854          return newBuilder().mergeFrom(input, extensionRegistry)
3855                   .buildParsed();
3856        }
3857        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseDelimitedFrom(java.io.InputStream input)
3858            throws java.io.IOException {
3859          Builder builder = newBuilder();
3860          if (builder.mergeDelimitedFrom(input)) {
3861            return builder.buildParsed();
3862          } else {
3863            return null;
3864          }
3865        }
3866        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseDelimitedFrom(
3867            java.io.InputStream input,
3868            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3869            throws java.io.IOException {
3870          Builder builder = newBuilder();
3871          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
3872            return builder.buildParsed();
3873          } else {
3874            return null;
3875          }
3876        }
3877        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
3878            com.google.protobuf.CodedInputStream input)
3879            throws java.io.IOException {
3880          return newBuilder().mergeFrom(input).buildParsed();
3881        }
3882        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto parseFrom(
3883            com.google.protobuf.CodedInputStream input,
3884            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3885            throws java.io.IOException {
3886          return newBuilder().mergeFrom(input, extensionRegistry)
3887                   .buildParsed();
3888        }
3889        
3890        public static Builder newBuilder() { return Builder.create(); }
3891        public Builder newBuilderForType() { return newBuilder(); }
3892        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto prototype) {
3893          return newBuilder().mergeFrom(prototype);
3894        }
3895        public Builder toBuilder() { return newBuilder(this); }
3896        
3897        @java.lang.Override
3898        protected Builder newBuilderForType(
3899            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
3900          Builder builder = new Builder(parent);
3901          return builder;
3902        }
3903        public static final class Builder extends
3904            com.google.protobuf.GeneratedMessage.Builder<Builder>
3905           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProtoOrBuilder {
3906          public static final com.google.protobuf.Descriptors.Descriptor
3907              getDescriptor() {
3908            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor;
3909          }
3910          
3911          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
3912              internalGetFieldAccessorTable() {
3913            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_HeartbeatResponseProto_fieldAccessorTable;
3914          }
3915          
3916          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.newBuilder()
3917          private Builder() {
3918            maybeForceBuilderInitialization();
3919          }
3920          
3921          private Builder(BuilderParent parent) {
3922            super(parent);
3923            maybeForceBuilderInitialization();
3924          }
3925          private void maybeForceBuilderInitialization() {
3926            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
3927            }
3928          }
3929          private static Builder create() {
3930            return new Builder();
3931          }
3932          
3933          public Builder clear() {
3934            super.clear();
3935            return this;
3936          }
3937          
3938          public Builder clone() {
3939            return create().mergeFrom(buildPartial());
3940          }
3941          
3942          public com.google.protobuf.Descriptors.Descriptor
3943              getDescriptorForType() {
3944            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDescriptor();
3945          }
3946          
3947          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto getDefaultInstanceForType() {
3948            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
3949          }
3950          
3951          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto build() {
3952            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto result = buildPartial();
3953            if (!result.isInitialized()) {
3954              throw newUninitializedMessageException(result);
3955            }
3956            return result;
3957          }
3958          
3959          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto buildParsed()
3960              throws com.google.protobuf.InvalidProtocolBufferException {
3961            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto result = buildPartial();
3962            if (!result.isInitialized()) {
3963              throw newUninitializedMessageException(
3964                result).asInvalidProtocolBufferException();
3965            }
3966            return result;
3967          }
3968          
3969          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto buildPartial() {
3970            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto(this);
3971            onBuilt();
3972            return result;
3973          }
3974          
3975          public Builder mergeFrom(com.google.protobuf.Message other) {
3976            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) {
3977              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto)other);
3978            } else {
3979              super.mergeFrom(other);
3980              return this;
3981            }
3982          }
3983          
3984          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto other) {
3985            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance()) return this;
3986            this.mergeUnknownFields(other.getUnknownFields());
3987            return this;
3988          }
3989          
3990          public final boolean isInitialized() {
3991            return true;
3992          }
3993          
3994          public Builder mergeFrom(
3995              com.google.protobuf.CodedInputStream input,
3996              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
3997              throws java.io.IOException {
3998            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
3999              com.google.protobuf.UnknownFieldSet.newBuilder(
4000                this.getUnknownFields());
4001            while (true) {
4002              int tag = input.readTag();
4003              switch (tag) {
4004                case 0:
4005                  this.setUnknownFields(unknownFields.build());
4006                  onChanged();
4007                  return this;
4008                default: {
4009                  if (!parseUnknownField(input, unknownFields,
4010                                         extensionRegistry, tag)) {
4011                    this.setUnknownFields(unknownFields.build());
4012                    onChanged();
4013                    return this;
4014                  }
4015                  break;
4016                }
4017              }
4018            }
4019          }
4020          
4021          
4022          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.HeartbeatResponseProto)
4023        }
4024        
4025        static {
4026          defaultInstance = new HeartbeatResponseProto(true);
4027          defaultInstance.initFields();
4028        }
4029        
4030        // @@protoc_insertion_point(class_scope:hadoop.hdfs.HeartbeatResponseProto)
4031      }
4032      
4033      public interface StartLogSegmentRequestProtoOrBuilder
4034          extends com.google.protobuf.MessageOrBuilder {
4035        
4036        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
4037        boolean hasReqInfo();
4038        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
4039        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
4040        
4041        // required uint64 txid = 2;
4042        boolean hasTxid();
4043        long getTxid();
4044      }
4045      public static final class StartLogSegmentRequestProto extends
4046          com.google.protobuf.GeneratedMessage
4047          implements StartLogSegmentRequestProtoOrBuilder {
4048        // Use StartLogSegmentRequestProto.newBuilder() to construct.
4049        private StartLogSegmentRequestProto(Builder builder) {
4050          super(builder);
4051        }
4052        private StartLogSegmentRequestProto(boolean noInit) {}
4053        
4054        private static final StartLogSegmentRequestProto defaultInstance;
4055        public static StartLogSegmentRequestProto getDefaultInstance() {
4056          return defaultInstance;
4057        }
4058        
4059        public StartLogSegmentRequestProto getDefaultInstanceForType() {
4060          return defaultInstance;
4061        }
4062        
4063        public static final com.google.protobuf.Descriptors.Descriptor
4064            getDescriptor() {
4065          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor;
4066        }
4067        
4068        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4069            internalGetFieldAccessorTable() {
4070          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentRequestProto_fieldAccessorTable;
4071        }
4072        
4073        private int bitField0_;
4074        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
4075        public static final int REQINFO_FIELD_NUMBER = 1;
4076        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
4077        public boolean hasReqInfo() {
4078          return ((bitField0_ & 0x00000001) == 0x00000001);
4079        }
4080        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
4081          return reqInfo_;
4082        }
4083        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
4084          return reqInfo_;
4085        }
4086        
4087        // required uint64 txid = 2;
4088        public static final int TXID_FIELD_NUMBER = 2;
4089        private long txid_;
4090        public boolean hasTxid() {
4091          return ((bitField0_ & 0x00000002) == 0x00000002);
4092        }
4093        public long getTxid() {
4094          return txid_;
4095        }
4096        
4097        private void initFields() {
4098          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
4099          txid_ = 0L;
4100        }
4101        private byte memoizedIsInitialized = -1;
4102        public final boolean isInitialized() {
4103          byte isInitialized = memoizedIsInitialized;
4104          if (isInitialized != -1) return isInitialized == 1;
4105          
4106          if (!hasReqInfo()) {
4107            memoizedIsInitialized = 0;
4108            return false;
4109          }
4110          if (!hasTxid()) {
4111            memoizedIsInitialized = 0;
4112            return false;
4113          }
4114          if (!getReqInfo().isInitialized()) {
4115            memoizedIsInitialized = 0;
4116            return false;
4117          }
4118          memoizedIsInitialized = 1;
4119          return true;
4120        }
4121        
4122        public void writeTo(com.google.protobuf.CodedOutputStream output)
4123                            throws java.io.IOException {
4124          getSerializedSize();
4125          if (((bitField0_ & 0x00000001) == 0x00000001)) {
4126            output.writeMessage(1, reqInfo_);
4127          }
4128          if (((bitField0_ & 0x00000002) == 0x00000002)) {
4129            output.writeUInt64(2, txid_);
4130          }
4131          getUnknownFields().writeTo(output);
4132        }
4133        
4134        private int memoizedSerializedSize = -1;
4135        public int getSerializedSize() {
4136          int size = memoizedSerializedSize;
4137          if (size != -1) return size;
4138        
4139          size = 0;
4140          if (((bitField0_ & 0x00000001) == 0x00000001)) {
4141            size += com.google.protobuf.CodedOutputStream
4142              .computeMessageSize(1, reqInfo_);
4143          }
4144          if (((bitField0_ & 0x00000002) == 0x00000002)) {
4145            size += com.google.protobuf.CodedOutputStream
4146              .computeUInt64Size(2, txid_);
4147          }
4148          size += getUnknownFields().getSerializedSize();
4149          memoizedSerializedSize = size;
4150          return size;
4151        }
4152        
4153        private static final long serialVersionUID = 0L;
4154        @java.lang.Override
4155        protected java.lang.Object writeReplace()
4156            throws java.io.ObjectStreamException {
4157          return super.writeReplace();
4158        }
4159        
4160        @java.lang.Override
4161        public boolean equals(final java.lang.Object obj) {
4162          if (obj == this) {
4163           return true;
4164          }
4165          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)) {
4166            return super.equals(obj);
4167          }
4168          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto) obj;
4169          
4170          boolean result = true;
4171          result = result && (hasReqInfo() == other.hasReqInfo());
4172          if (hasReqInfo()) {
4173            result = result && getReqInfo()
4174                .equals(other.getReqInfo());
4175          }
4176          result = result && (hasTxid() == other.hasTxid());
4177          if (hasTxid()) {
4178            result = result && (getTxid()
4179                == other.getTxid());
4180          }
4181          result = result &&
4182              getUnknownFields().equals(other.getUnknownFields());
4183          return result;
4184        }
4185        
4186        @java.lang.Override
4187        public int hashCode() {
4188          int hash = 41;
4189          hash = (19 * hash) + getDescriptorForType().hashCode();
4190          if (hasReqInfo()) {
4191            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
4192            hash = (53 * hash) + getReqInfo().hashCode();
4193          }
4194          if (hasTxid()) {
4195            hash = (37 * hash) + TXID_FIELD_NUMBER;
4196            hash = (53 * hash) + hashLong(getTxid());
4197          }
4198          hash = (29 * hash) + getUnknownFields().hashCode();
4199          return hash;
4200        }
4201        
4202        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
4203            com.google.protobuf.ByteString data)
4204            throws com.google.protobuf.InvalidProtocolBufferException {
4205          return newBuilder().mergeFrom(data).buildParsed();
4206        }
4207        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
4208            com.google.protobuf.ByteString data,
4209            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4210            throws com.google.protobuf.InvalidProtocolBufferException {
4211          return newBuilder().mergeFrom(data, extensionRegistry)
4212                   .buildParsed();
4213        }
4214        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(byte[] data)
4215            throws com.google.protobuf.InvalidProtocolBufferException {
4216          return newBuilder().mergeFrom(data).buildParsed();
4217        }
4218        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
4219            byte[] data,
4220            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4221            throws com.google.protobuf.InvalidProtocolBufferException {
4222          return newBuilder().mergeFrom(data, extensionRegistry)
4223                   .buildParsed();
4224        }
4225        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(java.io.InputStream input)
4226            throws java.io.IOException {
4227          return newBuilder().mergeFrom(input).buildParsed();
4228        }
4229        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
4230            java.io.InputStream input,
4231            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4232            throws java.io.IOException {
4233          return newBuilder().mergeFrom(input, extensionRegistry)
4234                   .buildParsed();
4235        }
4236        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseDelimitedFrom(java.io.InputStream input)
4237            throws java.io.IOException {
4238          Builder builder = newBuilder();
4239          if (builder.mergeDelimitedFrom(input)) {
4240            return builder.buildParsed();
4241          } else {
4242            return null;
4243          }
4244        }
4245        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseDelimitedFrom(
4246            java.io.InputStream input,
4247            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4248            throws java.io.IOException {
4249          Builder builder = newBuilder();
4250          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
4251            return builder.buildParsed();
4252          } else {
4253            return null;
4254          }
4255        }
4256        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
4257            com.google.protobuf.CodedInputStream input)
4258            throws java.io.IOException {
4259          return newBuilder().mergeFrom(input).buildParsed();
4260        }
4261        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto parseFrom(
4262            com.google.protobuf.CodedInputStream input,
4263            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4264            throws java.io.IOException {
4265          return newBuilder().mergeFrom(input, extensionRegistry)
4266                   .buildParsed();
4267        }
4268        
4269        public static Builder newBuilder() { return Builder.create(); }
4270        public Builder newBuilderForType() { return newBuilder(); }
4271        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto prototype) {
4272          return newBuilder().mergeFrom(prototype);
4273        }
4274        public Builder toBuilder() { return newBuilder(this); }
4275        
4276        @java.lang.Override
4277        protected Builder newBuilderForType(
4278            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4279          Builder builder = new Builder(parent);
4280          return builder;
4281        }
4282        public static final class Builder extends
4283            com.google.protobuf.GeneratedMessage.Builder<Builder>
4284           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProtoOrBuilder {
4285          public static final com.google.protobuf.Descriptors.Descriptor
4286              getDescriptor() {
4287            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor;
4288          }
4289          
4290          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4291              internalGetFieldAccessorTable() {
4292            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentRequestProto_fieldAccessorTable;
4293          }
4294          
4295          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.newBuilder()
4296          private Builder() {
4297            maybeForceBuilderInitialization();
4298          }
4299          
4300          private Builder(BuilderParent parent) {
4301            super(parent);
4302            maybeForceBuilderInitialization();
4303          }
4304          private void maybeForceBuilderInitialization() {
4305            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
4306              getReqInfoFieldBuilder();
4307            }
4308          }
4309          private static Builder create() {
4310            return new Builder();
4311          }
4312          
4313          public Builder clear() {
4314            super.clear();
4315            if (reqInfoBuilder_ == null) {
4316              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
4317            } else {
4318              reqInfoBuilder_.clear();
4319            }
4320            bitField0_ = (bitField0_ & ~0x00000001);
4321            txid_ = 0L;
4322            bitField0_ = (bitField0_ & ~0x00000002);
4323            return this;
4324          }
4325          
4326          public Builder clone() {
4327            return create().mergeFrom(buildPartial());
4328          }
4329          
4330          public com.google.protobuf.Descriptors.Descriptor
4331              getDescriptorForType() {
4332            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDescriptor();
4333          }
4334          
4335          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto getDefaultInstanceForType() {
4336            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance();
4337          }
4338          
4339          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto build() {
4340            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto result = buildPartial();
4341            if (!result.isInitialized()) {
4342              throw newUninitializedMessageException(result);
4343            }
4344            return result;
4345          }
4346          
4347          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto buildParsed()
4348              throws com.google.protobuf.InvalidProtocolBufferException {
4349            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto result = buildPartial();
4350            if (!result.isInitialized()) {
4351              throw newUninitializedMessageException(
4352                result).asInvalidProtocolBufferException();
4353            }
4354            return result;
4355          }
4356          
4357          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto buildPartial() {
4358            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto(this);
4359            int from_bitField0_ = bitField0_;
4360            int to_bitField0_ = 0;
4361            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
4362              to_bitField0_ |= 0x00000001;
4363            }
4364            if (reqInfoBuilder_ == null) {
4365              result.reqInfo_ = reqInfo_;
4366            } else {
4367              result.reqInfo_ = reqInfoBuilder_.build();
4368            }
4369            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
4370              to_bitField0_ |= 0x00000002;
4371            }
4372            result.txid_ = txid_;
4373            result.bitField0_ = to_bitField0_;
4374            onBuilt();
4375            return result;
4376          }
4377          
4378          public Builder mergeFrom(com.google.protobuf.Message other) {
4379            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto) {
4380              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)other);
4381            } else {
4382              super.mergeFrom(other);
4383              return this;
4384            }
4385          }
4386          
4387          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto other) {
4388            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance()) return this;
4389            if (other.hasReqInfo()) {
4390              mergeReqInfo(other.getReqInfo());
4391            }
4392            if (other.hasTxid()) {
4393              setTxid(other.getTxid());
4394            }
4395            this.mergeUnknownFields(other.getUnknownFields());
4396            return this;
4397          }
4398          
4399          public final boolean isInitialized() {
4400            if (!hasReqInfo()) {
4401              
4402              return false;
4403            }
4404            if (!hasTxid()) {
4405              
4406              return false;
4407            }
4408            if (!getReqInfo().isInitialized()) {
4409              
4410              return false;
4411            }
4412            return true;
4413          }
4414          
4415          public Builder mergeFrom(
4416              com.google.protobuf.CodedInputStream input,
4417              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4418              throws java.io.IOException {
4419            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
4420              com.google.protobuf.UnknownFieldSet.newBuilder(
4421                this.getUnknownFields());
4422            while (true) {
4423              int tag = input.readTag();
4424              switch (tag) {
4425                case 0:
4426                  this.setUnknownFields(unknownFields.build());
4427                  onChanged();
4428                  return this;
4429                default: {
4430                  if (!parseUnknownField(input, unknownFields,
4431                                         extensionRegistry, tag)) {
4432                    this.setUnknownFields(unknownFields.build());
4433                    onChanged();
4434                    return this;
4435                  }
4436                  break;
4437                }
4438                case 10: {
4439                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder();
4440                  if (hasReqInfo()) {
4441                    subBuilder.mergeFrom(getReqInfo());
4442                  }
4443                  input.readMessage(subBuilder, extensionRegistry);
4444                  setReqInfo(subBuilder.buildPartial());
4445                  break;
4446                }
4447                case 16: {
4448                  bitField0_ |= 0x00000002;
4449                  txid_ = input.readUInt64();
4450                  break;
4451                }
4452              }
4453            }
4454          }
4455          
4456          private int bitField0_;
4457          
4458          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
4459          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
4460          private com.google.protobuf.SingleFieldBuilder<
4461              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
4462          public boolean hasReqInfo() {
4463            return ((bitField0_ & 0x00000001) == 0x00000001);
4464          }
4465          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
4466            if (reqInfoBuilder_ == null) {
4467              return reqInfo_;
4468            } else {
4469              return reqInfoBuilder_.getMessage();
4470            }
4471          }
4472          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
4473            if (reqInfoBuilder_ == null) {
4474              if (value == null) {
4475                throw new NullPointerException();
4476              }
4477              reqInfo_ = value;
4478              onChanged();
4479            } else {
4480              reqInfoBuilder_.setMessage(value);
4481            }
4482            bitField0_ |= 0x00000001;
4483            return this;
4484          }
4485          public Builder setReqInfo(
4486              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
4487            if (reqInfoBuilder_ == null) {
4488              reqInfo_ = builderForValue.build();
4489              onChanged();
4490            } else {
4491              reqInfoBuilder_.setMessage(builderForValue.build());
4492            }
4493            bitField0_ |= 0x00000001;
4494            return this;
4495          }
4496          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
4497            if (reqInfoBuilder_ == null) {
4498              if (((bitField0_ & 0x00000001) == 0x00000001) &&
4499                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
4500                reqInfo_ =
4501                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
4502              } else {
4503                reqInfo_ = value;
4504              }
4505              onChanged();
4506            } else {
4507              reqInfoBuilder_.mergeFrom(value);
4508            }
4509            bitField0_ |= 0x00000001;
4510            return this;
4511          }
4512          public Builder clearReqInfo() {
4513            if (reqInfoBuilder_ == null) {
4514              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
4515              onChanged();
4516            } else {
4517              reqInfoBuilder_.clear();
4518            }
4519            bitField0_ = (bitField0_ & ~0x00000001);
4520            return this;
4521          }
4522          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
4523            bitField0_ |= 0x00000001;
4524            onChanged();
4525            return getReqInfoFieldBuilder().getBuilder();
4526          }
4527          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
4528            if (reqInfoBuilder_ != null) {
4529              return reqInfoBuilder_.getMessageOrBuilder();
4530            } else {
4531              return reqInfo_;
4532            }
4533          }
4534          private com.google.protobuf.SingleFieldBuilder<
4535              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
4536              getReqInfoFieldBuilder() {
4537            if (reqInfoBuilder_ == null) {
4538              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
4539                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
4540                      reqInfo_,
4541                      getParentForChildren(),
4542                      isClean());
4543              reqInfo_ = null;
4544            }
4545            return reqInfoBuilder_;
4546          }
4547          
4548          // required uint64 txid = 2;
4549          private long txid_ ;
4550          public boolean hasTxid() {
4551            return ((bitField0_ & 0x00000002) == 0x00000002);
4552          }
4553          public long getTxid() {
4554            return txid_;
4555          }
4556          public Builder setTxid(long value) {
4557            bitField0_ |= 0x00000002;
4558            txid_ = value;
4559            onChanged();
4560            return this;
4561          }
4562          public Builder clearTxid() {
4563            bitField0_ = (bitField0_ & ~0x00000002);
4564            txid_ = 0L;
4565            onChanged();
4566            return this;
4567          }
4568          
4569          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StartLogSegmentRequestProto)
4570        }
4571        
4572        static {
4573          defaultInstance = new StartLogSegmentRequestProto(true);
4574          defaultInstance.initFields();
4575        }
4576        
4577        // @@protoc_insertion_point(class_scope:hadoop.hdfs.StartLogSegmentRequestProto)
4578      }
4579      
4580      public interface StartLogSegmentResponseProtoOrBuilder
4581          extends com.google.protobuf.MessageOrBuilder {
4582      }
4583      public static final class StartLogSegmentResponseProto extends
4584          com.google.protobuf.GeneratedMessage
4585          implements StartLogSegmentResponseProtoOrBuilder {
4586        // Use StartLogSegmentResponseProto.newBuilder() to construct.
4587        private StartLogSegmentResponseProto(Builder builder) {
4588          super(builder);
4589        }
4590        private StartLogSegmentResponseProto(boolean noInit) {}
4591        
4592        private static final StartLogSegmentResponseProto defaultInstance;
4593        public static StartLogSegmentResponseProto getDefaultInstance() {
4594          return defaultInstance;
4595        }
4596        
4597        public StartLogSegmentResponseProto getDefaultInstanceForType() {
4598          return defaultInstance;
4599        }
4600        
4601        public static final com.google.protobuf.Descriptors.Descriptor
4602            getDescriptor() {
4603          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor;
4604        }
4605        
4606        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4607            internalGetFieldAccessorTable() {
4608          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentResponseProto_fieldAccessorTable;
4609        }
4610        
4611        private void initFields() {
4612        }
4613        private byte memoizedIsInitialized = -1;
4614        public final boolean isInitialized() {
4615          byte isInitialized = memoizedIsInitialized;
4616          if (isInitialized != -1) return isInitialized == 1;
4617          
4618          memoizedIsInitialized = 1;
4619          return true;
4620        }
4621        
4622        public void writeTo(com.google.protobuf.CodedOutputStream output)
4623                            throws java.io.IOException {
4624          getSerializedSize();
4625          getUnknownFields().writeTo(output);
4626        }
4627        
4628        private int memoizedSerializedSize = -1;
4629        public int getSerializedSize() {
4630          int size = memoizedSerializedSize;
4631          if (size != -1) return size;
4632        
4633          size = 0;
4634          size += getUnknownFields().getSerializedSize();
4635          memoizedSerializedSize = size;
4636          return size;
4637        }
4638        
4639        private static final long serialVersionUID = 0L;
4640        @java.lang.Override
4641        protected java.lang.Object writeReplace()
4642            throws java.io.ObjectStreamException {
4643          return super.writeReplace();
4644        }
4645        
4646        @java.lang.Override
4647        public boolean equals(final java.lang.Object obj) {
4648          if (obj == this) {
4649           return true;
4650          }
4651          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto)) {
4652            return super.equals(obj);
4653          }
4654          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) obj;
4655          
4656          boolean result = true;
4657          result = result &&
4658              getUnknownFields().equals(other.getUnknownFields());
4659          return result;
4660        }
4661        
4662        @java.lang.Override
4663        public int hashCode() {
4664          int hash = 41;
4665          hash = (19 * hash) + getDescriptorForType().hashCode();
4666          hash = (29 * hash) + getUnknownFields().hashCode();
4667          return hash;
4668        }
4669        
4670        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
4671            com.google.protobuf.ByteString data)
4672            throws com.google.protobuf.InvalidProtocolBufferException {
4673          return newBuilder().mergeFrom(data).buildParsed();
4674        }
4675        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
4676            com.google.protobuf.ByteString data,
4677            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4678            throws com.google.protobuf.InvalidProtocolBufferException {
4679          return newBuilder().mergeFrom(data, extensionRegistry)
4680                   .buildParsed();
4681        }
4682        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(byte[] data)
4683            throws com.google.protobuf.InvalidProtocolBufferException {
4684          return newBuilder().mergeFrom(data).buildParsed();
4685        }
4686        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
4687            byte[] data,
4688            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4689            throws com.google.protobuf.InvalidProtocolBufferException {
4690          return newBuilder().mergeFrom(data, extensionRegistry)
4691                   .buildParsed();
4692        }
4693        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(java.io.InputStream input)
4694            throws java.io.IOException {
4695          return newBuilder().mergeFrom(input).buildParsed();
4696        }
4697        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
4698            java.io.InputStream input,
4699            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4700            throws java.io.IOException {
4701          return newBuilder().mergeFrom(input, extensionRegistry)
4702                   .buildParsed();
4703        }
4704        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseDelimitedFrom(java.io.InputStream input)
4705            throws java.io.IOException {
4706          Builder builder = newBuilder();
4707          if (builder.mergeDelimitedFrom(input)) {
4708            return builder.buildParsed();
4709          } else {
4710            return null;
4711          }
4712        }
4713        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseDelimitedFrom(
4714            java.io.InputStream input,
4715            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4716            throws java.io.IOException {
4717          Builder builder = newBuilder();
4718          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
4719            return builder.buildParsed();
4720          } else {
4721            return null;
4722          }
4723        }
4724        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
4725            com.google.protobuf.CodedInputStream input)
4726            throws java.io.IOException {
4727          return newBuilder().mergeFrom(input).buildParsed();
4728        }
4729        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto parseFrom(
4730            com.google.protobuf.CodedInputStream input,
4731            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4732            throws java.io.IOException {
4733          return newBuilder().mergeFrom(input, extensionRegistry)
4734                   .buildParsed();
4735        }
4736        
4737        public static Builder newBuilder() { return Builder.create(); }
4738        public Builder newBuilderForType() { return newBuilder(); }
4739        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto prototype) {
4740          return newBuilder().mergeFrom(prototype);
4741        }
4742        public Builder toBuilder() { return newBuilder(this); }
4743        
4744        @java.lang.Override
4745        protected Builder newBuilderForType(
4746            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
4747          Builder builder = new Builder(parent);
4748          return builder;
4749        }
4750        public static final class Builder extends
4751            com.google.protobuf.GeneratedMessage.Builder<Builder>
4752           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProtoOrBuilder {
4753          public static final com.google.protobuf.Descriptors.Descriptor
4754              getDescriptor() {
4755            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor;
4756          }
4757          
4758          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4759              internalGetFieldAccessorTable() {
4760            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_StartLogSegmentResponseProto_fieldAccessorTable;
4761          }
4762          
4763          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.newBuilder()
4764          private Builder() {
4765            maybeForceBuilderInitialization();
4766          }
4767          
4768          private Builder(BuilderParent parent) {
4769            super(parent);
4770            maybeForceBuilderInitialization();
4771          }
4772          private void maybeForceBuilderInitialization() {
4773            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
4774            }
4775          }
4776          private static Builder create() {
4777            return new Builder();
4778          }
4779          
4780          public Builder clear() {
4781            super.clear();
4782            return this;
4783          }
4784          
4785          public Builder clone() {
4786            return create().mergeFrom(buildPartial());
4787          }
4788          
4789          public com.google.protobuf.Descriptors.Descriptor
4790              getDescriptorForType() {
4791            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDescriptor();
4792          }
4793          
4794          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto getDefaultInstanceForType() {
4795            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance();
4796          }
4797          
4798          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto build() {
4799            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto result = buildPartial();
4800            if (!result.isInitialized()) {
4801              throw newUninitializedMessageException(result);
4802            }
4803            return result;
4804          }
4805          
4806          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto buildParsed()
4807              throws com.google.protobuf.InvalidProtocolBufferException {
4808            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto result = buildPartial();
4809            if (!result.isInitialized()) {
4810              throw newUninitializedMessageException(
4811                result).asInvalidProtocolBufferException();
4812            }
4813            return result;
4814          }
4815          
4816          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto buildPartial() {
4817            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto(this);
4818            onBuilt();
4819            return result;
4820          }
4821          
4822          public Builder mergeFrom(com.google.protobuf.Message other) {
4823            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) {
4824              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto)other);
4825            } else {
4826              super.mergeFrom(other);
4827              return this;
4828            }
4829          }
4830          
4831          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto other) {
4832            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance()) return this;
4833            this.mergeUnknownFields(other.getUnknownFields());
4834            return this;
4835          }
4836          
4837          public final boolean isInitialized() {
4838            return true;
4839          }
4840          
4841          public Builder mergeFrom(
4842              com.google.protobuf.CodedInputStream input,
4843              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
4844              throws java.io.IOException {
4845            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
4846              com.google.protobuf.UnknownFieldSet.newBuilder(
4847                this.getUnknownFields());
4848            while (true) {
4849              int tag = input.readTag();
4850              switch (tag) {
4851                case 0:
4852                  this.setUnknownFields(unknownFields.build());
4853                  onChanged();
4854                  return this;
4855                default: {
4856                  if (!parseUnknownField(input, unknownFields,
4857                                         extensionRegistry, tag)) {
4858                    this.setUnknownFields(unknownFields.build());
4859                    onChanged();
4860                    return this;
4861                  }
4862                  break;
4863                }
4864              }
4865            }
4866          }
4867          
4868          
4869          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.StartLogSegmentResponseProto)
4870        }
4871        
4872        static {
4873          defaultInstance = new StartLogSegmentResponseProto(true);
4874          defaultInstance.initFields();
4875        }
4876        
4877        // @@protoc_insertion_point(class_scope:hadoop.hdfs.StartLogSegmentResponseProto)
4878      }
4879      
4880      public interface FinalizeLogSegmentRequestProtoOrBuilder
4881          extends com.google.protobuf.MessageOrBuilder {
4882        
4883        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
4884        boolean hasReqInfo();
4885        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
4886        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
4887        
4888        // required uint64 startTxId = 2;
4889        boolean hasStartTxId();
4890        long getStartTxId();
4891        
4892        // required uint64 endTxId = 3;
4893        boolean hasEndTxId();
4894        long getEndTxId();
4895      }
4896      public static final class FinalizeLogSegmentRequestProto extends
4897          com.google.protobuf.GeneratedMessage
4898          implements FinalizeLogSegmentRequestProtoOrBuilder {
4899        // Use FinalizeLogSegmentRequestProto.newBuilder() to construct.
4900        private FinalizeLogSegmentRequestProto(Builder builder) {
4901          super(builder);
4902        }
4903        private FinalizeLogSegmentRequestProto(boolean noInit) {}
4904        
4905        private static final FinalizeLogSegmentRequestProto defaultInstance;
4906        public static FinalizeLogSegmentRequestProto getDefaultInstance() {
4907          return defaultInstance;
4908        }
4909        
4910        public FinalizeLogSegmentRequestProto getDefaultInstanceForType() {
4911          return defaultInstance;
4912        }
4913        
4914        public static final com.google.protobuf.Descriptors.Descriptor
4915            getDescriptor() {
4916          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor;
4917        }
4918        
4919        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
4920            internalGetFieldAccessorTable() {
4921          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_fieldAccessorTable;
4922        }
4923        
4924        private int bitField0_;
4925        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
4926        public static final int REQINFO_FIELD_NUMBER = 1;
4927        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
4928        public boolean hasReqInfo() {
4929          return ((bitField0_ & 0x00000001) == 0x00000001);
4930        }
4931        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
4932          return reqInfo_;
4933        }
4934        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
4935          return reqInfo_;
4936        }
4937        
4938        // required uint64 startTxId = 2;
4939        public static final int STARTTXID_FIELD_NUMBER = 2;
4940        private long startTxId_;
4941        public boolean hasStartTxId() {
4942          return ((bitField0_ & 0x00000002) == 0x00000002);
4943        }
4944        public long getStartTxId() {
4945          return startTxId_;
4946        }
4947        
4948        // required uint64 endTxId = 3;
4949        public static final int ENDTXID_FIELD_NUMBER = 3;
4950        private long endTxId_;
4951        public boolean hasEndTxId() {
4952          return ((bitField0_ & 0x00000004) == 0x00000004);
4953        }
4954        public long getEndTxId() {
4955          return endTxId_;
4956        }
4957        
4958        private void initFields() {
4959          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
4960          startTxId_ = 0L;
4961          endTxId_ = 0L;
4962        }
4963        private byte memoizedIsInitialized = -1;
4964        public final boolean isInitialized() {
4965          byte isInitialized = memoizedIsInitialized;
4966          if (isInitialized != -1) return isInitialized == 1;
4967          
4968          if (!hasReqInfo()) {
4969            memoizedIsInitialized = 0;
4970            return false;
4971          }
4972          if (!hasStartTxId()) {
4973            memoizedIsInitialized = 0;
4974            return false;
4975          }
4976          if (!hasEndTxId()) {
4977            memoizedIsInitialized = 0;
4978            return false;
4979          }
4980          if (!getReqInfo().isInitialized()) {
4981            memoizedIsInitialized = 0;
4982            return false;
4983          }
4984          memoizedIsInitialized = 1;
4985          return true;
4986        }
4987        
4988        public void writeTo(com.google.protobuf.CodedOutputStream output)
4989                            throws java.io.IOException {
4990          getSerializedSize();
4991          if (((bitField0_ & 0x00000001) == 0x00000001)) {
4992            output.writeMessage(1, reqInfo_);
4993          }
4994          if (((bitField0_ & 0x00000002) == 0x00000002)) {
4995            output.writeUInt64(2, startTxId_);
4996          }
4997          if (((bitField0_ & 0x00000004) == 0x00000004)) {
4998            output.writeUInt64(3, endTxId_);
4999          }
5000          getUnknownFields().writeTo(output);
5001        }
5002        
5003        private int memoizedSerializedSize = -1;
5004        public int getSerializedSize() {
5005          int size = memoizedSerializedSize;
5006          if (size != -1) return size;
5007        
5008          size = 0;
5009          if (((bitField0_ & 0x00000001) == 0x00000001)) {
5010            size += com.google.protobuf.CodedOutputStream
5011              .computeMessageSize(1, reqInfo_);
5012          }
5013          if (((bitField0_ & 0x00000002) == 0x00000002)) {
5014            size += com.google.protobuf.CodedOutputStream
5015              .computeUInt64Size(2, startTxId_);
5016          }
5017          if (((bitField0_ & 0x00000004) == 0x00000004)) {
5018            size += com.google.protobuf.CodedOutputStream
5019              .computeUInt64Size(3, endTxId_);
5020          }
5021          size += getUnknownFields().getSerializedSize();
5022          memoizedSerializedSize = size;
5023          return size;
5024        }
5025        
5026        private static final long serialVersionUID = 0L;
5027        @java.lang.Override
5028        protected java.lang.Object writeReplace()
5029            throws java.io.ObjectStreamException {
5030          return super.writeReplace();
5031        }
5032        
5033        @java.lang.Override
5034        public boolean equals(final java.lang.Object obj) {
5035          if (obj == this) {
5036           return true;
5037          }
5038          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)) {
5039            return super.equals(obj);
5040          }
5041          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto) obj;
5042          
5043          boolean result = true;
5044          result = result && (hasReqInfo() == other.hasReqInfo());
5045          if (hasReqInfo()) {
5046            result = result && getReqInfo()
5047                .equals(other.getReqInfo());
5048          }
5049          result = result && (hasStartTxId() == other.hasStartTxId());
5050          if (hasStartTxId()) {
5051            result = result && (getStartTxId()
5052                == other.getStartTxId());
5053          }
5054          result = result && (hasEndTxId() == other.hasEndTxId());
5055          if (hasEndTxId()) {
5056            result = result && (getEndTxId()
5057                == other.getEndTxId());
5058          }
5059          result = result &&
5060              getUnknownFields().equals(other.getUnknownFields());
5061          return result;
5062        }
5063        
5064        @java.lang.Override
5065        public int hashCode() {
5066          int hash = 41;
5067          hash = (19 * hash) + getDescriptorForType().hashCode();
5068          if (hasReqInfo()) {
5069            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
5070            hash = (53 * hash) + getReqInfo().hashCode();
5071          }
5072          if (hasStartTxId()) {
5073            hash = (37 * hash) + STARTTXID_FIELD_NUMBER;
5074            hash = (53 * hash) + hashLong(getStartTxId());
5075          }
5076          if (hasEndTxId()) {
5077            hash = (37 * hash) + ENDTXID_FIELD_NUMBER;
5078            hash = (53 * hash) + hashLong(getEndTxId());
5079          }
5080          hash = (29 * hash) + getUnknownFields().hashCode();
5081          return hash;
5082        }
5083        
5084        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
5085            com.google.protobuf.ByteString data)
5086            throws com.google.protobuf.InvalidProtocolBufferException {
5087          return newBuilder().mergeFrom(data).buildParsed();
5088        }
5089        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
5090            com.google.protobuf.ByteString data,
5091            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5092            throws com.google.protobuf.InvalidProtocolBufferException {
5093          return newBuilder().mergeFrom(data, extensionRegistry)
5094                   .buildParsed();
5095        }
5096        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(byte[] data)
5097            throws com.google.protobuf.InvalidProtocolBufferException {
5098          return newBuilder().mergeFrom(data).buildParsed();
5099        }
5100        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
5101            byte[] data,
5102            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5103            throws com.google.protobuf.InvalidProtocolBufferException {
5104          return newBuilder().mergeFrom(data, extensionRegistry)
5105                   .buildParsed();
5106        }
5107        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(java.io.InputStream input)
5108            throws java.io.IOException {
5109          return newBuilder().mergeFrom(input).buildParsed();
5110        }
5111        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
5112            java.io.InputStream input,
5113            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5114            throws java.io.IOException {
5115          return newBuilder().mergeFrom(input, extensionRegistry)
5116                   .buildParsed();
5117        }
5118        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseDelimitedFrom(java.io.InputStream input)
5119            throws java.io.IOException {
5120          Builder builder = newBuilder();
5121          if (builder.mergeDelimitedFrom(input)) {
5122            return builder.buildParsed();
5123          } else {
5124            return null;
5125          }
5126        }
5127        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseDelimitedFrom(
5128            java.io.InputStream input,
5129            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5130            throws java.io.IOException {
5131          Builder builder = newBuilder();
5132          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
5133            return builder.buildParsed();
5134          } else {
5135            return null;
5136          }
5137        }
5138        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
5139            com.google.protobuf.CodedInputStream input)
5140            throws java.io.IOException {
5141          return newBuilder().mergeFrom(input).buildParsed();
5142        }
5143        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto parseFrom(
5144            com.google.protobuf.CodedInputStream input,
5145            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5146            throws java.io.IOException {
5147          return newBuilder().mergeFrom(input, extensionRegistry)
5148                   .buildParsed();
5149        }
5150        
5151        public static Builder newBuilder() { return Builder.create(); }
5152        public Builder newBuilderForType() { return newBuilder(); }
5153        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto prototype) {
5154          return newBuilder().mergeFrom(prototype);
5155        }
5156        public Builder toBuilder() { return newBuilder(this); }
5157        
5158        @java.lang.Override
5159        protected Builder newBuilderForType(
5160            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
5161          Builder builder = new Builder(parent);
5162          return builder;
5163        }
5164        public static final class Builder extends
5165            com.google.protobuf.GeneratedMessage.Builder<Builder>
5166           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProtoOrBuilder {
5167          public static final com.google.protobuf.Descriptors.Descriptor
5168              getDescriptor() {
5169            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor;
5170          }
5171          
5172          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
5173              internalGetFieldAccessorTable() {
5174            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_fieldAccessorTable;
5175          }
5176          
5177          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.newBuilder()
5178          private Builder() {
5179            maybeForceBuilderInitialization();
5180          }
5181          
5182          private Builder(BuilderParent parent) {
5183            super(parent);
5184            maybeForceBuilderInitialization();
5185          }
5186          private void maybeForceBuilderInitialization() {
5187            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
5188              getReqInfoFieldBuilder();
5189            }
5190          }
5191          private static Builder create() {
5192            return new Builder();
5193          }
5194          
5195          public Builder clear() {
5196            super.clear();
5197            if (reqInfoBuilder_ == null) {
5198              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
5199            } else {
5200              reqInfoBuilder_.clear();
5201            }
5202            bitField0_ = (bitField0_ & ~0x00000001);
5203            startTxId_ = 0L;
5204            bitField0_ = (bitField0_ & ~0x00000002);
5205            endTxId_ = 0L;
5206            bitField0_ = (bitField0_ & ~0x00000004);
5207            return this;
5208          }
5209          
5210          public Builder clone() {
5211            return create().mergeFrom(buildPartial());
5212          }
5213          
5214          public com.google.protobuf.Descriptors.Descriptor
5215              getDescriptorForType() {
5216            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDescriptor();
5217          }
5218          
5219          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto getDefaultInstanceForType() {
5220            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance();
5221          }
5222          
5223          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto build() {
5224            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto result = buildPartial();
5225            if (!result.isInitialized()) {
5226              throw newUninitializedMessageException(result);
5227            }
5228            return result;
5229          }
5230          
5231          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto buildParsed()
5232              throws com.google.protobuf.InvalidProtocolBufferException {
5233            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto result = buildPartial();
5234            if (!result.isInitialized()) {
5235              throw newUninitializedMessageException(
5236                result).asInvalidProtocolBufferException();
5237            }
5238            return result;
5239          }
5240          
5241          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto buildPartial() {
5242            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto(this);
5243            int from_bitField0_ = bitField0_;
5244            int to_bitField0_ = 0;
5245            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
5246              to_bitField0_ |= 0x00000001;
5247            }
5248            if (reqInfoBuilder_ == null) {
5249              result.reqInfo_ = reqInfo_;
5250            } else {
5251              result.reqInfo_ = reqInfoBuilder_.build();
5252            }
5253            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
5254              to_bitField0_ |= 0x00000002;
5255            }
5256            result.startTxId_ = startTxId_;
5257            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
5258              to_bitField0_ |= 0x00000004;
5259            }
5260            result.endTxId_ = endTxId_;
5261            result.bitField0_ = to_bitField0_;
5262            onBuilt();
5263            return result;
5264          }
5265          
5266          public Builder mergeFrom(com.google.protobuf.Message other) {
5267            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto) {
5268              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)other);
5269            } else {
5270              super.mergeFrom(other);
5271              return this;
5272            }
5273          }
5274          
5275          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto other) {
5276            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance()) return this;
5277            if (other.hasReqInfo()) {
5278              mergeReqInfo(other.getReqInfo());
5279            }
5280            if (other.hasStartTxId()) {
5281              setStartTxId(other.getStartTxId());
5282            }
5283            if (other.hasEndTxId()) {
5284              setEndTxId(other.getEndTxId());
5285            }
5286            this.mergeUnknownFields(other.getUnknownFields());
5287            return this;
5288          }
5289          
5290          public final boolean isInitialized() {
5291            if (!hasReqInfo()) {
5292              
5293              return false;
5294            }
5295            if (!hasStartTxId()) {
5296              
5297              return false;
5298            }
5299            if (!hasEndTxId()) {
5300              
5301              return false;
5302            }
5303            if (!getReqInfo().isInitialized()) {
5304              
5305              return false;
5306            }
5307            return true;
5308          }
5309          
5310          public Builder mergeFrom(
5311              com.google.protobuf.CodedInputStream input,
5312              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5313              throws java.io.IOException {
5314            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
5315              com.google.protobuf.UnknownFieldSet.newBuilder(
5316                this.getUnknownFields());
5317            while (true) {
5318              int tag = input.readTag();
5319              switch (tag) {
5320                case 0:
5321                  this.setUnknownFields(unknownFields.build());
5322                  onChanged();
5323                  return this;
5324                default: {
5325                  if (!parseUnknownField(input, unknownFields,
5326                                         extensionRegistry, tag)) {
5327                    this.setUnknownFields(unknownFields.build());
5328                    onChanged();
5329                    return this;
5330                  }
5331                  break;
5332                }
5333                case 10: {
5334                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder();
5335                  if (hasReqInfo()) {
5336                    subBuilder.mergeFrom(getReqInfo());
5337                  }
5338                  input.readMessage(subBuilder, extensionRegistry);
5339                  setReqInfo(subBuilder.buildPartial());
5340                  break;
5341                }
5342                case 16: {
5343                  bitField0_ |= 0x00000002;
5344                  startTxId_ = input.readUInt64();
5345                  break;
5346                }
5347                case 24: {
5348                  bitField0_ |= 0x00000004;
5349                  endTxId_ = input.readUInt64();
5350                  break;
5351                }
5352              }
5353            }
5354          }
5355          
5356          private int bitField0_;
5357          
5358          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
5359          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
5360          private com.google.protobuf.SingleFieldBuilder<
5361              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
5362          public boolean hasReqInfo() {
5363            return ((bitField0_ & 0x00000001) == 0x00000001);
5364          }
5365          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
5366            if (reqInfoBuilder_ == null) {
5367              return reqInfo_;
5368            } else {
5369              return reqInfoBuilder_.getMessage();
5370            }
5371          }
5372          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
5373            if (reqInfoBuilder_ == null) {
5374              if (value == null) {
5375                throw new NullPointerException();
5376              }
5377              reqInfo_ = value;
5378              onChanged();
5379            } else {
5380              reqInfoBuilder_.setMessage(value);
5381            }
5382            bitField0_ |= 0x00000001;
5383            return this;
5384          }
5385          public Builder setReqInfo(
5386              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
5387            if (reqInfoBuilder_ == null) {
5388              reqInfo_ = builderForValue.build();
5389              onChanged();
5390            } else {
5391              reqInfoBuilder_.setMessage(builderForValue.build());
5392            }
5393            bitField0_ |= 0x00000001;
5394            return this;
5395          }
5396          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
5397            if (reqInfoBuilder_ == null) {
5398              if (((bitField0_ & 0x00000001) == 0x00000001) &&
5399                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
5400                reqInfo_ =
5401                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
5402              } else {
5403                reqInfo_ = value;
5404              }
5405              onChanged();
5406            } else {
5407              reqInfoBuilder_.mergeFrom(value);
5408            }
5409            bitField0_ |= 0x00000001;
5410            return this;
5411          }
5412          public Builder clearReqInfo() {
5413            if (reqInfoBuilder_ == null) {
5414              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
5415              onChanged();
5416            } else {
5417              reqInfoBuilder_.clear();
5418            }
5419            bitField0_ = (bitField0_ & ~0x00000001);
5420            return this;
5421          }
5422          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
5423            bitField0_ |= 0x00000001;
5424            onChanged();
5425            return getReqInfoFieldBuilder().getBuilder();
5426          }
5427          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
5428            if (reqInfoBuilder_ != null) {
5429              return reqInfoBuilder_.getMessageOrBuilder();
5430            } else {
5431              return reqInfo_;
5432            }
5433          }
5434          private com.google.protobuf.SingleFieldBuilder<
5435              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
5436              getReqInfoFieldBuilder() {
5437            if (reqInfoBuilder_ == null) {
5438              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
5439                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
5440                      reqInfo_,
5441                      getParentForChildren(),
5442                      isClean());
5443              reqInfo_ = null;
5444            }
5445            return reqInfoBuilder_;
5446          }
5447          
5448          // required uint64 startTxId = 2;
5449          private long startTxId_ ;
5450          public boolean hasStartTxId() {
5451            return ((bitField0_ & 0x00000002) == 0x00000002);
5452          }
5453          public long getStartTxId() {
5454            return startTxId_;
5455          }
5456          public Builder setStartTxId(long value) {
5457            bitField0_ |= 0x00000002;
5458            startTxId_ = value;
5459            onChanged();
5460            return this;
5461          }
5462          public Builder clearStartTxId() {
5463            bitField0_ = (bitField0_ & ~0x00000002);
5464            startTxId_ = 0L;
5465            onChanged();
5466            return this;
5467          }
5468          
5469          // required uint64 endTxId = 3;
5470          private long endTxId_ ;
5471          public boolean hasEndTxId() {
5472            return ((bitField0_ & 0x00000004) == 0x00000004);
5473          }
5474          public long getEndTxId() {
5475            return endTxId_;
5476          }
5477          public Builder setEndTxId(long value) {
5478            bitField0_ |= 0x00000004;
5479            endTxId_ = value;
5480            onChanged();
5481            return this;
5482          }
5483          public Builder clearEndTxId() {
5484            bitField0_ = (bitField0_ & ~0x00000004);
5485            endTxId_ = 0L;
5486            onChanged();
5487            return this;
5488          }
5489          
5490          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FinalizeLogSegmentRequestProto)
5491        }
5492        
5493        static {
5494          defaultInstance = new FinalizeLogSegmentRequestProto(true);
5495          defaultInstance.initFields();
5496        }
5497        
5498        // @@protoc_insertion_point(class_scope:hadoop.hdfs.FinalizeLogSegmentRequestProto)
5499      }
5500      
5501      public interface FinalizeLogSegmentResponseProtoOrBuilder
5502          extends com.google.protobuf.MessageOrBuilder {
5503      }
5504      public static final class FinalizeLogSegmentResponseProto extends
5505          com.google.protobuf.GeneratedMessage
5506          implements FinalizeLogSegmentResponseProtoOrBuilder {
5507        // Use FinalizeLogSegmentResponseProto.newBuilder() to construct.
5508        private FinalizeLogSegmentResponseProto(Builder builder) {
5509          super(builder);
5510        }
5511        private FinalizeLogSegmentResponseProto(boolean noInit) {}
5512        
5513        private static final FinalizeLogSegmentResponseProto defaultInstance;
5514        public static FinalizeLogSegmentResponseProto getDefaultInstance() {
5515          return defaultInstance;
5516        }
5517        
5518        public FinalizeLogSegmentResponseProto getDefaultInstanceForType() {
5519          return defaultInstance;
5520        }
5521        
5522        public static final com.google.protobuf.Descriptors.Descriptor
5523            getDescriptor() {
5524          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor;
5525        }
5526        
5527        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
5528            internalGetFieldAccessorTable() {
5529          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_fieldAccessorTable;
5530        }
5531        
5532        private void initFields() {
5533        }
5534        private byte memoizedIsInitialized = -1;
5535        public final boolean isInitialized() {
5536          byte isInitialized = memoizedIsInitialized;
5537          if (isInitialized != -1) return isInitialized == 1;
5538          
5539          memoizedIsInitialized = 1;
5540          return true;
5541        }
5542        
5543        public void writeTo(com.google.protobuf.CodedOutputStream output)
5544                            throws java.io.IOException {
5545          getSerializedSize();
5546          getUnknownFields().writeTo(output);
5547        }
5548        
5549        private int memoizedSerializedSize = -1;
5550        public int getSerializedSize() {
5551          int size = memoizedSerializedSize;
5552          if (size != -1) return size;
5553        
5554          size = 0;
5555          size += getUnknownFields().getSerializedSize();
5556          memoizedSerializedSize = size;
5557          return size;
5558        }
5559        
5560        private static final long serialVersionUID = 0L;
5561        @java.lang.Override
5562        protected java.lang.Object writeReplace()
5563            throws java.io.ObjectStreamException {
5564          return super.writeReplace();
5565        }
5566        
5567        @java.lang.Override
5568        public boolean equals(final java.lang.Object obj) {
5569          if (obj == this) {
5570           return true;
5571          }
5572          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto)) {
5573            return super.equals(obj);
5574          }
5575          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) obj;
5576          
5577          boolean result = true;
5578          result = result &&
5579              getUnknownFields().equals(other.getUnknownFields());
5580          return result;
5581        }
5582        
5583        @java.lang.Override
5584        public int hashCode() {
5585          int hash = 41;
5586          hash = (19 * hash) + getDescriptorForType().hashCode();
5587          hash = (29 * hash) + getUnknownFields().hashCode();
5588          return hash;
5589        }
5590        
5591        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
5592            com.google.protobuf.ByteString data)
5593            throws com.google.protobuf.InvalidProtocolBufferException {
5594          return newBuilder().mergeFrom(data).buildParsed();
5595        }
5596        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
5597            com.google.protobuf.ByteString data,
5598            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5599            throws com.google.protobuf.InvalidProtocolBufferException {
5600          return newBuilder().mergeFrom(data, extensionRegistry)
5601                   .buildParsed();
5602        }
5603        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(byte[] data)
5604            throws com.google.protobuf.InvalidProtocolBufferException {
5605          return newBuilder().mergeFrom(data).buildParsed();
5606        }
5607        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
5608            byte[] data,
5609            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5610            throws com.google.protobuf.InvalidProtocolBufferException {
5611          return newBuilder().mergeFrom(data, extensionRegistry)
5612                   .buildParsed();
5613        }
5614        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(java.io.InputStream input)
5615            throws java.io.IOException {
5616          return newBuilder().mergeFrom(input).buildParsed();
5617        }
5618        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
5619            java.io.InputStream input,
5620            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5621            throws java.io.IOException {
5622          return newBuilder().mergeFrom(input, extensionRegistry)
5623                   .buildParsed();
5624        }
5625        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseDelimitedFrom(java.io.InputStream input)
5626            throws java.io.IOException {
5627          Builder builder = newBuilder();
5628          if (builder.mergeDelimitedFrom(input)) {
5629            return builder.buildParsed();
5630          } else {
5631            return null;
5632          }
5633        }
5634        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseDelimitedFrom(
5635            java.io.InputStream input,
5636            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5637            throws java.io.IOException {
5638          Builder builder = newBuilder();
5639          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
5640            return builder.buildParsed();
5641          } else {
5642            return null;
5643          }
5644        }
5645        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
5646            com.google.protobuf.CodedInputStream input)
5647            throws java.io.IOException {
5648          return newBuilder().mergeFrom(input).buildParsed();
5649        }
5650        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto parseFrom(
5651            com.google.protobuf.CodedInputStream input,
5652            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5653            throws java.io.IOException {
5654          return newBuilder().mergeFrom(input, extensionRegistry)
5655                   .buildParsed();
5656        }
5657        
5658        public static Builder newBuilder() { return Builder.create(); }
5659        public Builder newBuilderForType() { return newBuilder(); }
5660        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto prototype) {
5661          return newBuilder().mergeFrom(prototype);
5662        }
5663        public Builder toBuilder() { return newBuilder(this); }
5664        
5665        @java.lang.Override
5666        protected Builder newBuilderForType(
5667            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
5668          Builder builder = new Builder(parent);
5669          return builder;
5670        }
5671        public static final class Builder extends
5672            com.google.protobuf.GeneratedMessage.Builder<Builder>
5673           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProtoOrBuilder {
5674          public static final com.google.protobuf.Descriptors.Descriptor
5675              getDescriptor() {
5676            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor;
5677          }
5678          
5679          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
5680              internalGetFieldAccessorTable() {
5681            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_fieldAccessorTable;
5682          }
5683          
5684          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.newBuilder()
5685          private Builder() {
5686            maybeForceBuilderInitialization();
5687          }
5688          
5689          private Builder(BuilderParent parent) {
5690            super(parent);
5691            maybeForceBuilderInitialization();
5692          }
5693          private void maybeForceBuilderInitialization() {
5694            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
5695            }
5696          }
5697          private static Builder create() {
5698            return new Builder();
5699          }
5700          
5701          public Builder clear() {
5702            super.clear();
5703            return this;
5704          }
5705          
5706          public Builder clone() {
5707            return create().mergeFrom(buildPartial());
5708          }
5709          
5710          public com.google.protobuf.Descriptors.Descriptor
5711              getDescriptorForType() {
5712            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDescriptor();
5713          }
5714          
5715          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto getDefaultInstanceForType() {
5716            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance();
5717          }
5718          
5719          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto build() {
5720            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto result = buildPartial();
5721            if (!result.isInitialized()) {
5722              throw newUninitializedMessageException(result);
5723            }
5724            return result;
5725          }
5726          
5727          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto buildParsed()
5728              throws com.google.protobuf.InvalidProtocolBufferException {
5729            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto result = buildPartial();
5730            if (!result.isInitialized()) {
5731              throw newUninitializedMessageException(
5732                result).asInvalidProtocolBufferException();
5733            }
5734            return result;
5735          }
5736          
5737          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto buildPartial() {
5738            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto(this);
5739            onBuilt();
5740            return result;
5741          }
5742          
5743          public Builder mergeFrom(com.google.protobuf.Message other) {
5744            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) {
5745              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto)other);
5746            } else {
5747              super.mergeFrom(other);
5748              return this;
5749            }
5750          }
5751          
5752          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto other) {
5753            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance()) return this;
5754            this.mergeUnknownFields(other.getUnknownFields());
5755            return this;
5756          }
5757          
5758          public final boolean isInitialized() {
5759            return true;
5760          }
5761          
5762          public Builder mergeFrom(
5763              com.google.protobuf.CodedInputStream input,
5764              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5765              throws java.io.IOException {
5766            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
5767              com.google.protobuf.UnknownFieldSet.newBuilder(
5768                this.getUnknownFields());
5769            while (true) {
5770              int tag = input.readTag();
5771              switch (tag) {
5772                case 0:
5773                  this.setUnknownFields(unknownFields.build());
5774                  onChanged();
5775                  return this;
5776                default: {
5777                  if (!parseUnknownField(input, unknownFields,
5778                                         extensionRegistry, tag)) {
5779                    this.setUnknownFields(unknownFields.build());
5780                    onChanged();
5781                    return this;
5782                  }
5783                  break;
5784                }
5785              }
5786            }
5787          }
5788          
5789          
5790          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FinalizeLogSegmentResponseProto)
5791        }
5792        
5793        static {
5794          defaultInstance = new FinalizeLogSegmentResponseProto(true);
5795          defaultInstance.initFields();
5796        }
5797        
5798        // @@protoc_insertion_point(class_scope:hadoop.hdfs.FinalizeLogSegmentResponseProto)
5799      }
5800      
5801      public interface PurgeLogsRequestProtoOrBuilder
5802          extends com.google.protobuf.MessageOrBuilder {
5803        
5804        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
5805        boolean hasReqInfo();
5806        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
5807        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
5808        
5809        // required uint64 minTxIdToKeep = 2;
5810        boolean hasMinTxIdToKeep();
5811        long getMinTxIdToKeep();
5812      }
5813      public static final class PurgeLogsRequestProto extends
5814          com.google.protobuf.GeneratedMessage
5815          implements PurgeLogsRequestProtoOrBuilder {
5816        // Use PurgeLogsRequestProto.newBuilder() to construct.
5817        private PurgeLogsRequestProto(Builder builder) {
5818          super(builder);
5819        }
5820        private PurgeLogsRequestProto(boolean noInit) {}
5821        
5822        private static final PurgeLogsRequestProto defaultInstance;
5823        public static PurgeLogsRequestProto getDefaultInstance() {
5824          return defaultInstance;
5825        }
5826        
5827        public PurgeLogsRequestProto getDefaultInstanceForType() {
5828          return defaultInstance;
5829        }
5830        
5831        public static final com.google.protobuf.Descriptors.Descriptor
5832            getDescriptor() {
5833          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor;
5834        }
5835        
5836        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
5837            internalGetFieldAccessorTable() {
5838          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsRequestProto_fieldAccessorTable;
5839        }
5840        
5841        private int bitField0_;
5842        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
5843        public static final int REQINFO_FIELD_NUMBER = 1;
5844        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
5845        public boolean hasReqInfo() {
5846          return ((bitField0_ & 0x00000001) == 0x00000001);
5847        }
5848        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
5849          return reqInfo_;
5850        }
5851        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
5852          return reqInfo_;
5853        }
5854        
5855        // required uint64 minTxIdToKeep = 2;
5856        public static final int MINTXIDTOKEEP_FIELD_NUMBER = 2;
5857        private long minTxIdToKeep_;
5858        public boolean hasMinTxIdToKeep() {
5859          return ((bitField0_ & 0x00000002) == 0x00000002);
5860        }
5861        public long getMinTxIdToKeep() {
5862          return minTxIdToKeep_;
5863        }
5864        
5865        private void initFields() {
5866          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
5867          minTxIdToKeep_ = 0L;
5868        }
5869        private byte memoizedIsInitialized = -1;
5870        public final boolean isInitialized() {
5871          byte isInitialized = memoizedIsInitialized;
5872          if (isInitialized != -1) return isInitialized == 1;
5873          
5874          if (!hasReqInfo()) {
5875            memoizedIsInitialized = 0;
5876            return false;
5877          }
5878          if (!hasMinTxIdToKeep()) {
5879            memoizedIsInitialized = 0;
5880            return false;
5881          }
5882          if (!getReqInfo().isInitialized()) {
5883            memoizedIsInitialized = 0;
5884            return false;
5885          }
5886          memoizedIsInitialized = 1;
5887          return true;
5888        }
5889        
5890        public void writeTo(com.google.protobuf.CodedOutputStream output)
5891                            throws java.io.IOException {
5892          getSerializedSize();
5893          if (((bitField0_ & 0x00000001) == 0x00000001)) {
5894            output.writeMessage(1, reqInfo_);
5895          }
5896          if (((bitField0_ & 0x00000002) == 0x00000002)) {
5897            output.writeUInt64(2, minTxIdToKeep_);
5898          }
5899          getUnknownFields().writeTo(output);
5900        }
5901        
5902        private int memoizedSerializedSize = -1;
5903        public int getSerializedSize() {
5904          int size = memoizedSerializedSize;
5905          if (size != -1) return size;
5906        
5907          size = 0;
5908          if (((bitField0_ & 0x00000001) == 0x00000001)) {
5909            size += com.google.protobuf.CodedOutputStream
5910              .computeMessageSize(1, reqInfo_);
5911          }
5912          if (((bitField0_ & 0x00000002) == 0x00000002)) {
5913            size += com.google.protobuf.CodedOutputStream
5914              .computeUInt64Size(2, minTxIdToKeep_);
5915          }
5916          size += getUnknownFields().getSerializedSize();
5917          memoizedSerializedSize = size;
5918          return size;
5919        }
5920        
5921        private static final long serialVersionUID = 0L;
5922        @java.lang.Override
5923        protected java.lang.Object writeReplace()
5924            throws java.io.ObjectStreamException {
5925          return super.writeReplace();
5926        }
5927        
5928        @java.lang.Override
5929        public boolean equals(final java.lang.Object obj) {
5930          if (obj == this) {
5931           return true;
5932          }
5933          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)) {
5934            return super.equals(obj);
5935          }
5936          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto) obj;
5937          
5938          boolean result = true;
5939          result = result && (hasReqInfo() == other.hasReqInfo());
5940          if (hasReqInfo()) {
5941            result = result && getReqInfo()
5942                .equals(other.getReqInfo());
5943          }
5944          result = result && (hasMinTxIdToKeep() == other.hasMinTxIdToKeep());
5945          if (hasMinTxIdToKeep()) {
5946            result = result && (getMinTxIdToKeep()
5947                == other.getMinTxIdToKeep());
5948          }
5949          result = result &&
5950              getUnknownFields().equals(other.getUnknownFields());
5951          return result;
5952        }
5953        
5954        @java.lang.Override
5955        public int hashCode() {
5956          int hash = 41;
5957          hash = (19 * hash) + getDescriptorForType().hashCode();
5958          if (hasReqInfo()) {
5959            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
5960            hash = (53 * hash) + getReqInfo().hashCode();
5961          }
5962          if (hasMinTxIdToKeep()) {
5963            hash = (37 * hash) + MINTXIDTOKEEP_FIELD_NUMBER;
5964            hash = (53 * hash) + hashLong(getMinTxIdToKeep());
5965          }
5966          hash = (29 * hash) + getUnknownFields().hashCode();
5967          return hash;
5968        }
5969        
5970        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
5971            com.google.protobuf.ByteString data)
5972            throws com.google.protobuf.InvalidProtocolBufferException {
5973          return newBuilder().mergeFrom(data).buildParsed();
5974        }
5975        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
5976            com.google.protobuf.ByteString data,
5977            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5978            throws com.google.protobuf.InvalidProtocolBufferException {
5979          return newBuilder().mergeFrom(data, extensionRegistry)
5980                   .buildParsed();
5981        }
5982        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(byte[] data)
5983            throws com.google.protobuf.InvalidProtocolBufferException {
5984          return newBuilder().mergeFrom(data).buildParsed();
5985        }
5986        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
5987            byte[] data,
5988            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
5989            throws com.google.protobuf.InvalidProtocolBufferException {
5990          return newBuilder().mergeFrom(data, extensionRegistry)
5991                   .buildParsed();
5992        }
5993        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(java.io.InputStream input)
5994            throws java.io.IOException {
5995          return newBuilder().mergeFrom(input).buildParsed();
5996        }
5997        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
5998            java.io.InputStream input,
5999            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6000            throws java.io.IOException {
6001          return newBuilder().mergeFrom(input, extensionRegistry)
6002                   .buildParsed();
6003        }
6004        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseDelimitedFrom(java.io.InputStream input)
6005            throws java.io.IOException {
6006          Builder builder = newBuilder();
6007          if (builder.mergeDelimitedFrom(input)) {
6008            return builder.buildParsed();
6009          } else {
6010            return null;
6011          }
6012        }
6013        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseDelimitedFrom(
6014            java.io.InputStream input,
6015            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6016            throws java.io.IOException {
6017          Builder builder = newBuilder();
6018          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
6019            return builder.buildParsed();
6020          } else {
6021            return null;
6022          }
6023        }
6024        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
6025            com.google.protobuf.CodedInputStream input)
6026            throws java.io.IOException {
6027          return newBuilder().mergeFrom(input).buildParsed();
6028        }
6029        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto parseFrom(
6030            com.google.protobuf.CodedInputStream input,
6031            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6032            throws java.io.IOException {
6033          return newBuilder().mergeFrom(input, extensionRegistry)
6034                   .buildParsed();
6035        }
6036        
6037        public static Builder newBuilder() { return Builder.create(); }
6038        public Builder newBuilderForType() { return newBuilder(); }
6039        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto prototype) {
6040          return newBuilder().mergeFrom(prototype);
6041        }
6042        public Builder toBuilder() { return newBuilder(this); }
6043        
6044        @java.lang.Override
6045        protected Builder newBuilderForType(
6046            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
6047          Builder builder = new Builder(parent);
6048          return builder;
6049        }
6050        public static final class Builder extends
6051            com.google.protobuf.GeneratedMessage.Builder<Builder>
6052           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProtoOrBuilder {
6053          public static final com.google.protobuf.Descriptors.Descriptor
6054              getDescriptor() {
6055            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor;
6056          }
6057          
6058          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6059              internalGetFieldAccessorTable() {
6060            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsRequestProto_fieldAccessorTable;
6061          }
6062          
6063          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.newBuilder()
6064          private Builder() {
6065            maybeForceBuilderInitialization();
6066          }
6067          
6068          private Builder(BuilderParent parent) {
6069            super(parent);
6070            maybeForceBuilderInitialization();
6071          }
6072          private void maybeForceBuilderInitialization() {
6073            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
6074              getReqInfoFieldBuilder();
6075            }
6076          }
6077          private static Builder create() {
6078            return new Builder();
6079          }
6080          
6081          public Builder clear() {
6082            super.clear();
6083            if (reqInfoBuilder_ == null) {
6084              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
6085            } else {
6086              reqInfoBuilder_.clear();
6087            }
6088            bitField0_ = (bitField0_ & ~0x00000001);
6089            minTxIdToKeep_ = 0L;
6090            bitField0_ = (bitField0_ & ~0x00000002);
6091            return this;
6092          }
6093          
6094          public Builder clone() {
6095            return create().mergeFrom(buildPartial());
6096          }
6097          
6098          public com.google.protobuf.Descriptors.Descriptor
6099              getDescriptorForType() {
6100            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDescriptor();
6101          }
6102          
6103          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto getDefaultInstanceForType() {
6104            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance();
6105          }
6106          
6107          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto build() {
6108            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto result = buildPartial();
6109            if (!result.isInitialized()) {
6110              throw newUninitializedMessageException(result);
6111            }
6112            return result;
6113          }
6114          
6115          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto buildParsed()
6116              throws com.google.protobuf.InvalidProtocolBufferException {
6117            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto result = buildPartial();
6118            if (!result.isInitialized()) {
6119              throw newUninitializedMessageException(
6120                result).asInvalidProtocolBufferException();
6121            }
6122            return result;
6123          }
6124          
6125          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto buildPartial() {
6126            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto(this);
6127            int from_bitField0_ = bitField0_;
6128            int to_bitField0_ = 0;
6129            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
6130              to_bitField0_ |= 0x00000001;
6131            }
6132            if (reqInfoBuilder_ == null) {
6133              result.reqInfo_ = reqInfo_;
6134            } else {
6135              result.reqInfo_ = reqInfoBuilder_.build();
6136            }
6137            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
6138              to_bitField0_ |= 0x00000002;
6139            }
6140            result.minTxIdToKeep_ = minTxIdToKeep_;
6141            result.bitField0_ = to_bitField0_;
6142            onBuilt();
6143            return result;
6144          }
6145          
6146          public Builder mergeFrom(com.google.protobuf.Message other) {
6147            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto) {
6148              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)other);
6149            } else {
6150              super.mergeFrom(other);
6151              return this;
6152            }
6153          }
6154          
6155          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto other) {
6156            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance()) return this;
6157            if (other.hasReqInfo()) {
6158              mergeReqInfo(other.getReqInfo());
6159            }
6160            if (other.hasMinTxIdToKeep()) {
6161              setMinTxIdToKeep(other.getMinTxIdToKeep());
6162            }
6163            this.mergeUnknownFields(other.getUnknownFields());
6164            return this;
6165          }
6166          
6167          public final boolean isInitialized() {
6168            if (!hasReqInfo()) {
6169              
6170              return false;
6171            }
6172            if (!hasMinTxIdToKeep()) {
6173              
6174              return false;
6175            }
6176            if (!getReqInfo().isInitialized()) {
6177              
6178              return false;
6179            }
6180            return true;
6181          }
6182          
6183          public Builder mergeFrom(
6184              com.google.protobuf.CodedInputStream input,
6185              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6186              throws java.io.IOException {
6187            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
6188              com.google.protobuf.UnknownFieldSet.newBuilder(
6189                this.getUnknownFields());
6190            while (true) {
6191              int tag = input.readTag();
6192              switch (tag) {
6193                case 0:
6194                  this.setUnknownFields(unknownFields.build());
6195                  onChanged();
6196                  return this;
6197                default: {
6198                  if (!parseUnknownField(input, unknownFields,
6199                                         extensionRegistry, tag)) {
6200                    this.setUnknownFields(unknownFields.build());
6201                    onChanged();
6202                    return this;
6203                  }
6204                  break;
6205                }
6206                case 10: {
6207                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder();
6208                  if (hasReqInfo()) {
6209                    subBuilder.mergeFrom(getReqInfo());
6210                  }
6211                  input.readMessage(subBuilder, extensionRegistry);
6212                  setReqInfo(subBuilder.buildPartial());
6213                  break;
6214                }
6215                case 16: {
6216                  bitField0_ |= 0x00000002;
6217                  minTxIdToKeep_ = input.readUInt64();
6218                  break;
6219                }
6220              }
6221            }
6222          }
6223          
6224          private int bitField0_;
6225          
6226          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
6227          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
6228          private com.google.protobuf.SingleFieldBuilder<
6229              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
6230          public boolean hasReqInfo() {
6231            return ((bitField0_ & 0x00000001) == 0x00000001);
6232          }
6233          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
6234            if (reqInfoBuilder_ == null) {
6235              return reqInfo_;
6236            } else {
6237              return reqInfoBuilder_.getMessage();
6238            }
6239          }
6240          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
6241            if (reqInfoBuilder_ == null) {
6242              if (value == null) {
6243                throw new NullPointerException();
6244              }
6245              reqInfo_ = value;
6246              onChanged();
6247            } else {
6248              reqInfoBuilder_.setMessage(value);
6249            }
6250            bitField0_ |= 0x00000001;
6251            return this;
6252          }
6253          public Builder setReqInfo(
6254              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
6255            if (reqInfoBuilder_ == null) {
6256              reqInfo_ = builderForValue.build();
6257              onChanged();
6258            } else {
6259              reqInfoBuilder_.setMessage(builderForValue.build());
6260            }
6261            bitField0_ |= 0x00000001;
6262            return this;
6263          }
6264          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
6265            if (reqInfoBuilder_ == null) {
6266              if (((bitField0_ & 0x00000001) == 0x00000001) &&
6267                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
6268                reqInfo_ =
6269                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
6270              } else {
6271                reqInfo_ = value;
6272              }
6273              onChanged();
6274            } else {
6275              reqInfoBuilder_.mergeFrom(value);
6276            }
6277            bitField0_ |= 0x00000001;
6278            return this;
6279          }
6280          public Builder clearReqInfo() {
6281            if (reqInfoBuilder_ == null) {
6282              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
6283              onChanged();
6284            } else {
6285              reqInfoBuilder_.clear();
6286            }
6287            bitField0_ = (bitField0_ & ~0x00000001);
6288            return this;
6289          }
6290          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
6291            bitField0_ |= 0x00000001;
6292            onChanged();
6293            return getReqInfoFieldBuilder().getBuilder();
6294          }
6295          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
6296            if (reqInfoBuilder_ != null) {
6297              return reqInfoBuilder_.getMessageOrBuilder();
6298            } else {
6299              return reqInfo_;
6300            }
6301          }
6302          private com.google.protobuf.SingleFieldBuilder<
6303              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
6304              getReqInfoFieldBuilder() {
6305            if (reqInfoBuilder_ == null) {
6306              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
6307                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
6308                      reqInfo_,
6309                      getParentForChildren(),
6310                      isClean());
6311              reqInfo_ = null;
6312            }
6313            return reqInfoBuilder_;
6314          }
6315          
6316          // required uint64 minTxIdToKeep = 2;
6317          private long minTxIdToKeep_ ;
6318          public boolean hasMinTxIdToKeep() {
6319            return ((bitField0_ & 0x00000002) == 0x00000002);
6320          }
6321          public long getMinTxIdToKeep() {
6322            return minTxIdToKeep_;
6323          }
6324          public Builder setMinTxIdToKeep(long value) {
6325            bitField0_ |= 0x00000002;
6326            minTxIdToKeep_ = value;
6327            onChanged();
6328            return this;
6329          }
6330          public Builder clearMinTxIdToKeep() {
6331            bitField0_ = (bitField0_ & ~0x00000002);
6332            minTxIdToKeep_ = 0L;
6333            onChanged();
6334            return this;
6335          }
6336          
6337          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PurgeLogsRequestProto)
6338        }
6339        
6340        static {
6341          defaultInstance = new PurgeLogsRequestProto(true);
6342          defaultInstance.initFields();
6343        }
6344        
6345        // @@protoc_insertion_point(class_scope:hadoop.hdfs.PurgeLogsRequestProto)
6346      }
6347      
6348      public interface PurgeLogsResponseProtoOrBuilder
6349          extends com.google.protobuf.MessageOrBuilder {
6350      }
6351      public static final class PurgeLogsResponseProto extends
6352          com.google.protobuf.GeneratedMessage
6353          implements PurgeLogsResponseProtoOrBuilder {
6354        // Use PurgeLogsResponseProto.newBuilder() to construct.
6355        private PurgeLogsResponseProto(Builder builder) {
6356          super(builder);
6357        }
6358        private PurgeLogsResponseProto(boolean noInit) {}
6359        
6360        private static final PurgeLogsResponseProto defaultInstance;
6361        public static PurgeLogsResponseProto getDefaultInstance() {
6362          return defaultInstance;
6363        }
6364        
6365        public PurgeLogsResponseProto getDefaultInstanceForType() {
6366          return defaultInstance;
6367        }
6368        
6369        public static final com.google.protobuf.Descriptors.Descriptor
6370            getDescriptor() {
6371          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor;
6372        }
6373        
6374        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6375            internalGetFieldAccessorTable() {
6376          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsResponseProto_fieldAccessorTable;
6377        }
6378        
6379        private void initFields() {
6380        }
6381        private byte memoizedIsInitialized = -1;
6382        public final boolean isInitialized() {
6383          byte isInitialized = memoizedIsInitialized;
6384          if (isInitialized != -1) return isInitialized == 1;
6385          
6386          memoizedIsInitialized = 1;
6387          return true;
6388        }
6389        
6390        public void writeTo(com.google.protobuf.CodedOutputStream output)
6391                            throws java.io.IOException {
6392          getSerializedSize();
6393          getUnknownFields().writeTo(output);
6394        }
6395        
6396        private int memoizedSerializedSize = -1;
6397        public int getSerializedSize() {
6398          int size = memoizedSerializedSize;
6399          if (size != -1) return size;
6400        
6401          size = 0;
6402          size += getUnknownFields().getSerializedSize();
6403          memoizedSerializedSize = size;
6404          return size;
6405        }
6406        
6407        private static final long serialVersionUID = 0L;
6408        @java.lang.Override
6409        protected java.lang.Object writeReplace()
6410            throws java.io.ObjectStreamException {
6411          return super.writeReplace();
6412        }
6413        
6414        @java.lang.Override
6415        public boolean equals(final java.lang.Object obj) {
6416          if (obj == this) {
6417           return true;
6418          }
6419          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto)) {
6420            return super.equals(obj);
6421          }
6422          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) obj;
6423          
6424          boolean result = true;
6425          result = result &&
6426              getUnknownFields().equals(other.getUnknownFields());
6427          return result;
6428        }
6429        
6430        @java.lang.Override
6431        public int hashCode() {
6432          int hash = 41;
6433          hash = (19 * hash) + getDescriptorForType().hashCode();
6434          hash = (29 * hash) + getUnknownFields().hashCode();
6435          return hash;
6436        }
6437        
6438        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
6439            com.google.protobuf.ByteString data)
6440            throws com.google.protobuf.InvalidProtocolBufferException {
6441          return newBuilder().mergeFrom(data).buildParsed();
6442        }
6443        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
6444            com.google.protobuf.ByteString data,
6445            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6446            throws com.google.protobuf.InvalidProtocolBufferException {
6447          return newBuilder().mergeFrom(data, extensionRegistry)
6448                   .buildParsed();
6449        }
6450        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(byte[] data)
6451            throws com.google.protobuf.InvalidProtocolBufferException {
6452          return newBuilder().mergeFrom(data).buildParsed();
6453        }
6454        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
6455            byte[] data,
6456            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6457            throws com.google.protobuf.InvalidProtocolBufferException {
6458          return newBuilder().mergeFrom(data, extensionRegistry)
6459                   .buildParsed();
6460        }
6461        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(java.io.InputStream input)
6462            throws java.io.IOException {
6463          return newBuilder().mergeFrom(input).buildParsed();
6464        }
6465        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
6466            java.io.InputStream input,
6467            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6468            throws java.io.IOException {
6469          return newBuilder().mergeFrom(input, extensionRegistry)
6470                   .buildParsed();
6471        }
6472        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseDelimitedFrom(java.io.InputStream input)
6473            throws java.io.IOException {
6474          Builder builder = newBuilder();
6475          if (builder.mergeDelimitedFrom(input)) {
6476            return builder.buildParsed();
6477          } else {
6478            return null;
6479          }
6480        }
6481        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseDelimitedFrom(
6482            java.io.InputStream input,
6483            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6484            throws java.io.IOException {
6485          Builder builder = newBuilder();
6486          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
6487            return builder.buildParsed();
6488          } else {
6489            return null;
6490          }
6491        }
6492        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
6493            com.google.protobuf.CodedInputStream input)
6494            throws java.io.IOException {
6495          return newBuilder().mergeFrom(input).buildParsed();
6496        }
6497        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto parseFrom(
6498            com.google.protobuf.CodedInputStream input,
6499            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6500            throws java.io.IOException {
6501          return newBuilder().mergeFrom(input, extensionRegistry)
6502                   .buildParsed();
6503        }
6504        
6505        public static Builder newBuilder() { return Builder.create(); }
6506        public Builder newBuilderForType() { return newBuilder(); }
6507        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto prototype) {
6508          return newBuilder().mergeFrom(prototype);
6509        }
6510        public Builder toBuilder() { return newBuilder(this); }
6511        
6512        @java.lang.Override
6513        protected Builder newBuilderForType(
6514            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
6515          Builder builder = new Builder(parent);
6516          return builder;
6517        }
6518        public static final class Builder extends
6519            com.google.protobuf.GeneratedMessage.Builder<Builder>
6520           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProtoOrBuilder {
6521          public static final com.google.protobuf.Descriptors.Descriptor
6522              getDescriptor() {
6523            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor;
6524          }
6525          
6526          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6527              internalGetFieldAccessorTable() {
6528            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PurgeLogsResponseProto_fieldAccessorTable;
6529          }
6530          
6531          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.newBuilder()
6532          private Builder() {
6533            maybeForceBuilderInitialization();
6534          }
6535          
6536          private Builder(BuilderParent parent) {
6537            super(parent);
6538            maybeForceBuilderInitialization();
6539          }
6540          private void maybeForceBuilderInitialization() {
6541            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
6542            }
6543          }
6544          private static Builder create() {
6545            return new Builder();
6546          }
6547          
6548          public Builder clear() {
6549            super.clear();
6550            return this;
6551          }
6552          
6553          public Builder clone() {
6554            return create().mergeFrom(buildPartial());
6555          }
6556          
6557          public com.google.protobuf.Descriptors.Descriptor
6558              getDescriptorForType() {
6559            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDescriptor();
6560          }
6561          
6562          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto getDefaultInstanceForType() {
6563            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance();
6564          }
6565          
6566          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto build() {
6567            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto result = buildPartial();
6568            if (!result.isInitialized()) {
6569              throw newUninitializedMessageException(result);
6570            }
6571            return result;
6572          }
6573          
6574          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto buildParsed()
6575              throws com.google.protobuf.InvalidProtocolBufferException {
6576            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto result = buildPartial();
6577            if (!result.isInitialized()) {
6578              throw newUninitializedMessageException(
6579                result).asInvalidProtocolBufferException();
6580            }
6581            return result;
6582          }
6583          
6584          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto buildPartial() {
6585            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto(this);
6586            onBuilt();
6587            return result;
6588          }
6589          
6590          public Builder mergeFrom(com.google.protobuf.Message other) {
6591            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) {
6592              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto)other);
6593            } else {
6594              super.mergeFrom(other);
6595              return this;
6596            }
6597          }
6598          
6599          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto other) {
6600            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance()) return this;
6601            this.mergeUnknownFields(other.getUnknownFields());
6602            return this;
6603          }
6604          
6605          public final boolean isInitialized() {
6606            return true;
6607          }
6608          
6609          public Builder mergeFrom(
6610              com.google.protobuf.CodedInputStream input,
6611              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6612              throws java.io.IOException {
6613            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
6614              com.google.protobuf.UnknownFieldSet.newBuilder(
6615                this.getUnknownFields());
6616            while (true) {
6617              int tag = input.readTag();
6618              switch (tag) {
6619                case 0:
6620                  this.setUnknownFields(unknownFields.build());
6621                  onChanged();
6622                  return this;
6623                default: {
6624                  if (!parseUnknownField(input, unknownFields,
6625                                         extensionRegistry, tag)) {
6626                    this.setUnknownFields(unknownFields.build());
6627                    onChanged();
6628                    return this;
6629                  }
6630                  break;
6631                }
6632              }
6633            }
6634          }
6635          
6636          
6637          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PurgeLogsResponseProto)
6638        }
6639        
6640        static {
6641          defaultInstance = new PurgeLogsResponseProto(true);
6642          defaultInstance.initFields();
6643        }
6644        
6645        // @@protoc_insertion_point(class_scope:hadoop.hdfs.PurgeLogsResponseProto)
6646      }
6647      
6648      public interface IsFormattedRequestProtoOrBuilder
6649          extends com.google.protobuf.MessageOrBuilder {
6650        
6651        // required .hadoop.hdfs.JournalIdProto jid = 1;
6652        boolean hasJid();
6653        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
6654        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
6655      }
6656      public static final class IsFormattedRequestProto extends
6657          com.google.protobuf.GeneratedMessage
6658          implements IsFormattedRequestProtoOrBuilder {
6659        // Use IsFormattedRequestProto.newBuilder() to construct.
6660        private IsFormattedRequestProto(Builder builder) {
6661          super(builder);
6662        }
6663        private IsFormattedRequestProto(boolean noInit) {}
6664        
6665        private static final IsFormattedRequestProto defaultInstance;
6666        public static IsFormattedRequestProto getDefaultInstance() {
6667          return defaultInstance;
6668        }
6669        
6670        public IsFormattedRequestProto getDefaultInstanceForType() {
6671          return defaultInstance;
6672        }
6673        
6674        public static final com.google.protobuf.Descriptors.Descriptor
6675            getDescriptor() {
6676          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor;
6677        }
6678        
6679        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6680            internalGetFieldAccessorTable() {
6681          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedRequestProto_fieldAccessorTable;
6682        }
6683        
6684        private int bitField0_;
6685        // required .hadoop.hdfs.JournalIdProto jid = 1;
6686        public static final int JID_FIELD_NUMBER = 1;
6687        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
6688        public boolean hasJid() {
6689          return ((bitField0_ & 0x00000001) == 0x00000001);
6690        }
6691        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
6692          return jid_;
6693        }
6694        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
6695          return jid_;
6696        }
6697        
6698        private void initFields() {
6699          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
6700        }
6701        private byte memoizedIsInitialized = -1;
6702        public final boolean isInitialized() {
6703          byte isInitialized = memoizedIsInitialized;
6704          if (isInitialized != -1) return isInitialized == 1;
6705          
6706          if (!hasJid()) {
6707            memoizedIsInitialized = 0;
6708            return false;
6709          }
6710          if (!getJid().isInitialized()) {
6711            memoizedIsInitialized = 0;
6712            return false;
6713          }
6714          memoizedIsInitialized = 1;
6715          return true;
6716        }
6717        
6718        public void writeTo(com.google.protobuf.CodedOutputStream output)
6719                            throws java.io.IOException {
6720          getSerializedSize();
6721          if (((bitField0_ & 0x00000001) == 0x00000001)) {
6722            output.writeMessage(1, jid_);
6723          }
6724          getUnknownFields().writeTo(output);
6725        }
6726        
6727        private int memoizedSerializedSize = -1;
6728        public int getSerializedSize() {
6729          int size = memoizedSerializedSize;
6730          if (size != -1) return size;
6731        
6732          size = 0;
6733          if (((bitField0_ & 0x00000001) == 0x00000001)) {
6734            size += com.google.protobuf.CodedOutputStream
6735              .computeMessageSize(1, jid_);
6736          }
6737          size += getUnknownFields().getSerializedSize();
6738          memoizedSerializedSize = size;
6739          return size;
6740        }
6741        
6742        private static final long serialVersionUID = 0L;
6743        @java.lang.Override
6744        protected java.lang.Object writeReplace()
6745            throws java.io.ObjectStreamException {
6746          return super.writeReplace();
6747        }
6748        
6749        @java.lang.Override
6750        public boolean equals(final java.lang.Object obj) {
6751          if (obj == this) {
6752           return true;
6753          }
6754          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)) {
6755            return super.equals(obj);
6756          }
6757          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto) obj;
6758          
6759          boolean result = true;
6760          result = result && (hasJid() == other.hasJid());
6761          if (hasJid()) {
6762            result = result && getJid()
6763                .equals(other.getJid());
6764          }
6765          result = result &&
6766              getUnknownFields().equals(other.getUnknownFields());
6767          return result;
6768        }
6769        
6770        @java.lang.Override
6771        public int hashCode() {
6772          int hash = 41;
6773          hash = (19 * hash) + getDescriptorForType().hashCode();
6774          if (hasJid()) {
6775            hash = (37 * hash) + JID_FIELD_NUMBER;
6776            hash = (53 * hash) + getJid().hashCode();
6777          }
6778          hash = (29 * hash) + getUnknownFields().hashCode();
6779          return hash;
6780        }
6781        
6782        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
6783            com.google.protobuf.ByteString data)
6784            throws com.google.protobuf.InvalidProtocolBufferException {
6785          return newBuilder().mergeFrom(data).buildParsed();
6786        }
6787        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
6788            com.google.protobuf.ByteString data,
6789            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6790            throws com.google.protobuf.InvalidProtocolBufferException {
6791          return newBuilder().mergeFrom(data, extensionRegistry)
6792                   .buildParsed();
6793        }
6794        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(byte[] data)
6795            throws com.google.protobuf.InvalidProtocolBufferException {
6796          return newBuilder().mergeFrom(data).buildParsed();
6797        }
6798        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
6799            byte[] data,
6800            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6801            throws com.google.protobuf.InvalidProtocolBufferException {
6802          return newBuilder().mergeFrom(data, extensionRegistry)
6803                   .buildParsed();
6804        }
6805        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(java.io.InputStream input)
6806            throws java.io.IOException {
6807          return newBuilder().mergeFrom(input).buildParsed();
6808        }
6809        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
6810            java.io.InputStream input,
6811            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6812            throws java.io.IOException {
6813          return newBuilder().mergeFrom(input, extensionRegistry)
6814                   .buildParsed();
6815        }
6816        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseDelimitedFrom(java.io.InputStream input)
6817            throws java.io.IOException {
6818          Builder builder = newBuilder();
6819          if (builder.mergeDelimitedFrom(input)) {
6820            return builder.buildParsed();
6821          } else {
6822            return null;
6823          }
6824        }
6825        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseDelimitedFrom(
6826            java.io.InputStream input,
6827            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6828            throws java.io.IOException {
6829          Builder builder = newBuilder();
6830          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
6831            return builder.buildParsed();
6832          } else {
6833            return null;
6834          }
6835        }
6836        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
6837            com.google.protobuf.CodedInputStream input)
6838            throws java.io.IOException {
6839          return newBuilder().mergeFrom(input).buildParsed();
6840        }
6841        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto parseFrom(
6842            com.google.protobuf.CodedInputStream input,
6843            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6844            throws java.io.IOException {
6845          return newBuilder().mergeFrom(input, extensionRegistry)
6846                   .buildParsed();
6847        }
6848        
6849        public static Builder newBuilder() { return Builder.create(); }
6850        public Builder newBuilderForType() { return newBuilder(); }
6851        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto prototype) {
6852          return newBuilder().mergeFrom(prototype);
6853        }
6854        public Builder toBuilder() { return newBuilder(this); }
6855        
6856        @java.lang.Override
6857        protected Builder newBuilderForType(
6858            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
6859          Builder builder = new Builder(parent);
6860          return builder;
6861        }
6862        public static final class Builder extends
6863            com.google.protobuf.GeneratedMessage.Builder<Builder>
6864           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProtoOrBuilder {
6865          public static final com.google.protobuf.Descriptors.Descriptor
6866              getDescriptor() {
6867            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor;
6868          }
6869          
6870          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
6871              internalGetFieldAccessorTable() {
6872            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedRequestProto_fieldAccessorTable;
6873          }
6874          
6875          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.newBuilder()
6876          private Builder() {
6877            maybeForceBuilderInitialization();
6878          }
6879          
6880          private Builder(BuilderParent parent) {
6881            super(parent);
6882            maybeForceBuilderInitialization();
6883          }
6884          private void maybeForceBuilderInitialization() {
6885            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
6886              getJidFieldBuilder();
6887            }
6888          }
6889          private static Builder create() {
6890            return new Builder();
6891          }
6892          
6893          public Builder clear() {
6894            super.clear();
6895            if (jidBuilder_ == null) {
6896              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
6897            } else {
6898              jidBuilder_.clear();
6899            }
6900            bitField0_ = (bitField0_ & ~0x00000001);
6901            return this;
6902          }
6903          
6904          public Builder clone() {
6905            return create().mergeFrom(buildPartial());
6906          }
6907          
6908          public com.google.protobuf.Descriptors.Descriptor
6909              getDescriptorForType() {
6910            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDescriptor();
6911          }
6912          
6913          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto getDefaultInstanceForType() {
6914            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance();
6915          }
6916          
6917          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto build() {
6918            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto result = buildPartial();
6919            if (!result.isInitialized()) {
6920              throw newUninitializedMessageException(result);
6921            }
6922            return result;
6923          }
6924          
6925          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto buildParsed()
6926              throws com.google.protobuf.InvalidProtocolBufferException {
6927            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto result = buildPartial();
6928            if (!result.isInitialized()) {
6929              throw newUninitializedMessageException(
6930                result).asInvalidProtocolBufferException();
6931            }
6932            return result;
6933          }
6934          
6935          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto buildPartial() {
6936            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto(this);
6937            int from_bitField0_ = bitField0_;
6938            int to_bitField0_ = 0;
6939            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
6940              to_bitField0_ |= 0x00000001;
6941            }
6942            if (jidBuilder_ == null) {
6943              result.jid_ = jid_;
6944            } else {
6945              result.jid_ = jidBuilder_.build();
6946            }
6947            result.bitField0_ = to_bitField0_;
6948            onBuilt();
6949            return result;
6950          }
6951          
6952          public Builder mergeFrom(com.google.protobuf.Message other) {
6953            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto) {
6954              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)other);
6955            } else {
6956              super.mergeFrom(other);
6957              return this;
6958            }
6959          }
6960          
6961          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto other) {
6962            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance()) return this;
6963            if (other.hasJid()) {
6964              mergeJid(other.getJid());
6965            }
6966            this.mergeUnknownFields(other.getUnknownFields());
6967            return this;
6968          }
6969          
6970          public final boolean isInitialized() {
6971            if (!hasJid()) {
6972              
6973              return false;
6974            }
6975            if (!getJid().isInitialized()) {
6976              
6977              return false;
6978            }
6979            return true;
6980          }
6981          
6982          public Builder mergeFrom(
6983              com.google.protobuf.CodedInputStream input,
6984              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
6985              throws java.io.IOException {
6986            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
6987              com.google.protobuf.UnknownFieldSet.newBuilder(
6988                this.getUnknownFields());
6989            while (true) {
6990              int tag = input.readTag();
6991              switch (tag) {
6992                case 0:
6993                  this.setUnknownFields(unknownFields.build());
6994                  onChanged();
6995                  return this;
6996                default: {
6997                  if (!parseUnknownField(input, unknownFields,
6998                                         extensionRegistry, tag)) {
6999                    this.setUnknownFields(unknownFields.build());
7000                    onChanged();
7001                    return this;
7002                  }
7003                  break;
7004                }
7005                case 10: {
7006                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder();
7007                  if (hasJid()) {
7008                    subBuilder.mergeFrom(getJid());
7009                  }
7010                  input.readMessage(subBuilder, extensionRegistry);
7011                  setJid(subBuilder.buildPartial());
7012                  break;
7013                }
7014              }
7015            }
7016          }
7017          
7018          private int bitField0_;
7019          
7020          // required .hadoop.hdfs.JournalIdProto jid = 1;
7021          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
7022          private com.google.protobuf.SingleFieldBuilder<
7023              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
7024          public boolean hasJid() {
7025            return ((bitField0_ & 0x00000001) == 0x00000001);
7026          }
7027          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
7028            if (jidBuilder_ == null) {
7029              return jid_;
7030            } else {
7031              return jidBuilder_.getMessage();
7032            }
7033          }
7034          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
7035            if (jidBuilder_ == null) {
7036              if (value == null) {
7037                throw new NullPointerException();
7038              }
7039              jid_ = value;
7040              onChanged();
7041            } else {
7042              jidBuilder_.setMessage(value);
7043            }
7044            bitField0_ |= 0x00000001;
7045            return this;
7046          }
7047          public Builder setJid(
7048              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
7049            if (jidBuilder_ == null) {
7050              jid_ = builderForValue.build();
7051              onChanged();
7052            } else {
7053              jidBuilder_.setMessage(builderForValue.build());
7054            }
7055            bitField0_ |= 0x00000001;
7056            return this;
7057          }
7058          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
7059            if (jidBuilder_ == null) {
7060              if (((bitField0_ & 0x00000001) == 0x00000001) &&
7061                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
7062                jid_ =
7063                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
7064              } else {
7065                jid_ = value;
7066              }
7067              onChanged();
7068            } else {
7069              jidBuilder_.mergeFrom(value);
7070            }
7071            bitField0_ |= 0x00000001;
7072            return this;
7073          }
7074          public Builder clearJid() {
7075            if (jidBuilder_ == null) {
7076              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
7077              onChanged();
7078            } else {
7079              jidBuilder_.clear();
7080            }
7081            bitField0_ = (bitField0_ & ~0x00000001);
7082            return this;
7083          }
7084          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
7085            bitField0_ |= 0x00000001;
7086            onChanged();
7087            return getJidFieldBuilder().getBuilder();
7088          }
7089          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
7090            if (jidBuilder_ != null) {
7091              return jidBuilder_.getMessageOrBuilder();
7092            } else {
7093              return jid_;
7094            }
7095          }
7096          private com.google.protobuf.SingleFieldBuilder<
7097              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
7098              getJidFieldBuilder() {
7099            if (jidBuilder_ == null) {
7100              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
7101                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
7102                      jid_,
7103                      getParentForChildren(),
7104                      isClean());
7105              jid_ = null;
7106            }
7107            return jidBuilder_;
7108          }
7109          
7110          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.IsFormattedRequestProto)
7111        }
7112        
7113        static {
7114          defaultInstance = new IsFormattedRequestProto(true);
7115          defaultInstance.initFields();
7116        }
7117        
7118        // @@protoc_insertion_point(class_scope:hadoop.hdfs.IsFormattedRequestProto)
7119      }
7120      
7121      public interface IsFormattedResponseProtoOrBuilder
7122          extends com.google.protobuf.MessageOrBuilder {
7123        
7124        // required bool isFormatted = 1;
7125        boolean hasIsFormatted();
7126        boolean getIsFormatted();
7127      }
7128      public static final class IsFormattedResponseProto extends
7129          com.google.protobuf.GeneratedMessage
7130          implements IsFormattedResponseProtoOrBuilder {
7131        // Use IsFormattedResponseProto.newBuilder() to construct.
7132        private IsFormattedResponseProto(Builder builder) {
7133          super(builder);
7134        }
7135        private IsFormattedResponseProto(boolean noInit) {}
7136        
7137        private static final IsFormattedResponseProto defaultInstance;
7138        public static IsFormattedResponseProto getDefaultInstance() {
7139          return defaultInstance;
7140        }
7141        
7142        public IsFormattedResponseProto getDefaultInstanceForType() {
7143          return defaultInstance;
7144        }
7145        
7146        public static final com.google.protobuf.Descriptors.Descriptor
7147            getDescriptor() {
7148          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor;
7149        }
7150        
7151        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7152            internalGetFieldAccessorTable() {
7153          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedResponseProto_fieldAccessorTable;
7154        }
7155        
7156        private int bitField0_;
7157        // required bool isFormatted = 1;
7158        public static final int ISFORMATTED_FIELD_NUMBER = 1;
7159        private boolean isFormatted_;
7160        public boolean hasIsFormatted() {
7161          return ((bitField0_ & 0x00000001) == 0x00000001);
7162        }
7163        public boolean getIsFormatted() {
7164          return isFormatted_;
7165        }
7166        
7167        private void initFields() {
7168          isFormatted_ = false;
7169        }
7170        private byte memoizedIsInitialized = -1;
7171        public final boolean isInitialized() {
7172          byte isInitialized = memoizedIsInitialized;
7173          if (isInitialized != -1) return isInitialized == 1;
7174          
7175          if (!hasIsFormatted()) {
7176            memoizedIsInitialized = 0;
7177            return false;
7178          }
7179          memoizedIsInitialized = 1;
7180          return true;
7181        }
7182        
7183        public void writeTo(com.google.protobuf.CodedOutputStream output)
7184                            throws java.io.IOException {
7185          getSerializedSize();
7186          if (((bitField0_ & 0x00000001) == 0x00000001)) {
7187            output.writeBool(1, isFormatted_);
7188          }
7189          getUnknownFields().writeTo(output);
7190        }
7191        
7192        private int memoizedSerializedSize = -1;
7193        public int getSerializedSize() {
7194          int size = memoizedSerializedSize;
7195          if (size != -1) return size;
7196        
7197          size = 0;
7198          if (((bitField0_ & 0x00000001) == 0x00000001)) {
7199            size += com.google.protobuf.CodedOutputStream
7200              .computeBoolSize(1, isFormatted_);
7201          }
7202          size += getUnknownFields().getSerializedSize();
7203          memoizedSerializedSize = size;
7204          return size;
7205        }
7206        
7207        private static final long serialVersionUID = 0L;
7208        @java.lang.Override
7209        protected java.lang.Object writeReplace()
7210            throws java.io.ObjectStreamException {
7211          return super.writeReplace();
7212        }
7213        
7214        @java.lang.Override
7215        public boolean equals(final java.lang.Object obj) {
7216          if (obj == this) {
7217           return true;
7218          }
7219          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto)) {
7220            return super.equals(obj);
7221          }
7222          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) obj;
7223          
7224          boolean result = true;
7225          result = result && (hasIsFormatted() == other.hasIsFormatted());
7226          if (hasIsFormatted()) {
7227            result = result && (getIsFormatted()
7228                == other.getIsFormatted());
7229          }
7230          result = result &&
7231              getUnknownFields().equals(other.getUnknownFields());
7232          return result;
7233        }
7234        
7235        @java.lang.Override
7236        public int hashCode() {
7237          int hash = 41;
7238          hash = (19 * hash) + getDescriptorForType().hashCode();
7239          if (hasIsFormatted()) {
7240            hash = (37 * hash) + ISFORMATTED_FIELD_NUMBER;
7241            hash = (53 * hash) + hashBoolean(getIsFormatted());
7242          }
7243          hash = (29 * hash) + getUnknownFields().hashCode();
7244          return hash;
7245        }
7246        
7247        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
7248            com.google.protobuf.ByteString data)
7249            throws com.google.protobuf.InvalidProtocolBufferException {
7250          return newBuilder().mergeFrom(data).buildParsed();
7251        }
7252        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
7253            com.google.protobuf.ByteString data,
7254            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7255            throws com.google.protobuf.InvalidProtocolBufferException {
7256          return newBuilder().mergeFrom(data, extensionRegistry)
7257                   .buildParsed();
7258        }
7259        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(byte[] data)
7260            throws com.google.protobuf.InvalidProtocolBufferException {
7261          return newBuilder().mergeFrom(data).buildParsed();
7262        }
7263        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
7264            byte[] data,
7265            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7266            throws com.google.protobuf.InvalidProtocolBufferException {
7267          return newBuilder().mergeFrom(data, extensionRegistry)
7268                   .buildParsed();
7269        }
7270        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(java.io.InputStream input)
7271            throws java.io.IOException {
7272          return newBuilder().mergeFrom(input).buildParsed();
7273        }
7274        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
7275            java.io.InputStream input,
7276            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7277            throws java.io.IOException {
7278          return newBuilder().mergeFrom(input, extensionRegistry)
7279                   .buildParsed();
7280        }
7281        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseDelimitedFrom(java.io.InputStream input)
7282            throws java.io.IOException {
7283          Builder builder = newBuilder();
7284          if (builder.mergeDelimitedFrom(input)) {
7285            return builder.buildParsed();
7286          } else {
7287            return null;
7288          }
7289        }
7290        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseDelimitedFrom(
7291            java.io.InputStream input,
7292            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7293            throws java.io.IOException {
7294          Builder builder = newBuilder();
7295          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
7296            return builder.buildParsed();
7297          } else {
7298            return null;
7299          }
7300        }
7301        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
7302            com.google.protobuf.CodedInputStream input)
7303            throws java.io.IOException {
7304          return newBuilder().mergeFrom(input).buildParsed();
7305        }
7306        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto parseFrom(
7307            com.google.protobuf.CodedInputStream input,
7308            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7309            throws java.io.IOException {
7310          return newBuilder().mergeFrom(input, extensionRegistry)
7311                   .buildParsed();
7312        }
7313        
7314        public static Builder newBuilder() { return Builder.create(); }
7315        public Builder newBuilderForType() { return newBuilder(); }
7316        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto prototype) {
7317          return newBuilder().mergeFrom(prototype);
7318        }
7319        public Builder toBuilder() { return newBuilder(this); }
7320        
7321        @java.lang.Override
7322        protected Builder newBuilderForType(
7323            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7324          Builder builder = new Builder(parent);
7325          return builder;
7326        }
7327        public static final class Builder extends
7328            com.google.protobuf.GeneratedMessage.Builder<Builder>
7329           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProtoOrBuilder {
7330          public static final com.google.protobuf.Descriptors.Descriptor
7331              getDescriptor() {
7332            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor;
7333          }
7334          
7335          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7336              internalGetFieldAccessorTable() {
7337            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_IsFormattedResponseProto_fieldAccessorTable;
7338          }
7339          
7340          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.newBuilder()
7341          private Builder() {
7342            maybeForceBuilderInitialization();
7343          }
7344          
7345          private Builder(BuilderParent parent) {
7346            super(parent);
7347            maybeForceBuilderInitialization();
7348          }
7349          private void maybeForceBuilderInitialization() {
7350            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
7351            }
7352          }
7353          private static Builder create() {
7354            return new Builder();
7355          }
7356          
7357          public Builder clear() {
7358            super.clear();
7359            isFormatted_ = false;
7360            bitField0_ = (bitField0_ & ~0x00000001);
7361            return this;
7362          }
7363          
7364          public Builder clone() {
7365            return create().mergeFrom(buildPartial());
7366          }
7367          
7368          public com.google.protobuf.Descriptors.Descriptor
7369              getDescriptorForType() {
7370            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDescriptor();
7371          }
7372          
7373          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto getDefaultInstanceForType() {
7374            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance();
7375          }
7376          
7377          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto build() {
7378            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto result = buildPartial();
7379            if (!result.isInitialized()) {
7380              throw newUninitializedMessageException(result);
7381            }
7382            return result;
7383          }
7384          
7385          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto buildParsed()
7386              throws com.google.protobuf.InvalidProtocolBufferException {
7387            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto result = buildPartial();
7388            if (!result.isInitialized()) {
7389              throw newUninitializedMessageException(
7390                result).asInvalidProtocolBufferException();
7391            }
7392            return result;
7393          }
7394          
7395          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto buildPartial() {
7396            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto(this);
7397            int from_bitField0_ = bitField0_;
7398            int to_bitField0_ = 0;
7399            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
7400              to_bitField0_ |= 0x00000001;
7401            }
7402            result.isFormatted_ = isFormatted_;
7403            result.bitField0_ = to_bitField0_;
7404            onBuilt();
7405            return result;
7406          }
7407          
7408          public Builder mergeFrom(com.google.protobuf.Message other) {
7409            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) {
7410              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto)other);
7411            } else {
7412              super.mergeFrom(other);
7413              return this;
7414            }
7415          }
7416          
7417          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto other) {
7418            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance()) return this;
7419            if (other.hasIsFormatted()) {
7420              setIsFormatted(other.getIsFormatted());
7421            }
7422            this.mergeUnknownFields(other.getUnknownFields());
7423            return this;
7424          }
7425          
7426          public final boolean isInitialized() {
7427            if (!hasIsFormatted()) {
7428              
7429              return false;
7430            }
7431            return true;
7432          }
7433          
7434          public Builder mergeFrom(
7435              com.google.protobuf.CodedInputStream input,
7436              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7437              throws java.io.IOException {
7438            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
7439              com.google.protobuf.UnknownFieldSet.newBuilder(
7440                this.getUnknownFields());
7441            while (true) {
7442              int tag = input.readTag();
7443              switch (tag) {
7444                case 0:
7445                  this.setUnknownFields(unknownFields.build());
7446                  onChanged();
7447                  return this;
7448                default: {
7449                  if (!parseUnknownField(input, unknownFields,
7450                                         extensionRegistry, tag)) {
7451                    this.setUnknownFields(unknownFields.build());
7452                    onChanged();
7453                    return this;
7454                  }
7455                  break;
7456                }
7457                case 8: {
7458                  bitField0_ |= 0x00000001;
7459                  isFormatted_ = input.readBool();
7460                  break;
7461                }
7462              }
7463            }
7464          }
7465          
7466          private int bitField0_;
7467          
7468          // required bool isFormatted = 1;
7469          private boolean isFormatted_ ;
7470          public boolean hasIsFormatted() {
7471            return ((bitField0_ & 0x00000001) == 0x00000001);
7472          }
7473          public boolean getIsFormatted() {
7474            return isFormatted_;
7475          }
7476          public Builder setIsFormatted(boolean value) {
7477            bitField0_ |= 0x00000001;
7478            isFormatted_ = value;
7479            onChanged();
7480            return this;
7481          }
7482          public Builder clearIsFormatted() {
7483            bitField0_ = (bitField0_ & ~0x00000001);
7484            isFormatted_ = false;
7485            onChanged();
7486            return this;
7487          }
7488          
7489          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.IsFormattedResponseProto)
7490        }
7491        
7492        static {
7493          defaultInstance = new IsFormattedResponseProto(true);
7494          defaultInstance.initFields();
7495        }
7496        
7497        // @@protoc_insertion_point(class_scope:hadoop.hdfs.IsFormattedResponseProto)
7498      }
7499      
7500      public interface GetJournalStateRequestProtoOrBuilder
7501          extends com.google.protobuf.MessageOrBuilder {
7502        
7503        // required .hadoop.hdfs.JournalIdProto jid = 1;
7504        boolean hasJid();
7505        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
7506        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
7507      }
7508      public static final class GetJournalStateRequestProto extends
7509          com.google.protobuf.GeneratedMessage
7510          implements GetJournalStateRequestProtoOrBuilder {
7511        // Use GetJournalStateRequestProto.newBuilder() to construct.
7512        private GetJournalStateRequestProto(Builder builder) {
7513          super(builder);
7514        }
7515        private GetJournalStateRequestProto(boolean noInit) {}
7516        
7517        private static final GetJournalStateRequestProto defaultInstance;
7518        public static GetJournalStateRequestProto getDefaultInstance() {
7519          return defaultInstance;
7520        }
7521        
7522        public GetJournalStateRequestProto getDefaultInstanceForType() {
7523          return defaultInstance;
7524        }
7525        
7526        public static final com.google.protobuf.Descriptors.Descriptor
7527            getDescriptor() {
7528          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor;
7529        }
7530        
7531        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7532            internalGetFieldAccessorTable() {
7533          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateRequestProto_fieldAccessorTable;
7534        }
7535        
7536        private int bitField0_;
7537        // required .hadoop.hdfs.JournalIdProto jid = 1;
7538        public static final int JID_FIELD_NUMBER = 1;
7539        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
7540        public boolean hasJid() {
7541          return ((bitField0_ & 0x00000001) == 0x00000001);
7542        }
7543        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
7544          return jid_;
7545        }
7546        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
7547          return jid_;
7548        }
7549        
7550        private void initFields() {
7551          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
7552        }
7553        private byte memoizedIsInitialized = -1;
7554        public final boolean isInitialized() {
7555          byte isInitialized = memoizedIsInitialized;
7556          if (isInitialized != -1) return isInitialized == 1;
7557          
7558          if (!hasJid()) {
7559            memoizedIsInitialized = 0;
7560            return false;
7561          }
7562          if (!getJid().isInitialized()) {
7563            memoizedIsInitialized = 0;
7564            return false;
7565          }
7566          memoizedIsInitialized = 1;
7567          return true;
7568        }
7569        
7570        public void writeTo(com.google.protobuf.CodedOutputStream output)
7571                            throws java.io.IOException {
7572          getSerializedSize();
7573          if (((bitField0_ & 0x00000001) == 0x00000001)) {
7574            output.writeMessage(1, jid_);
7575          }
7576          getUnknownFields().writeTo(output);
7577        }
7578        
7579        private int memoizedSerializedSize = -1;
7580        public int getSerializedSize() {
7581          int size = memoizedSerializedSize;
7582          if (size != -1) return size;
7583        
7584          size = 0;
7585          if (((bitField0_ & 0x00000001) == 0x00000001)) {
7586            size += com.google.protobuf.CodedOutputStream
7587              .computeMessageSize(1, jid_);
7588          }
7589          size += getUnknownFields().getSerializedSize();
7590          memoizedSerializedSize = size;
7591          return size;
7592        }
7593        
7594        private static final long serialVersionUID = 0L;
7595        @java.lang.Override
7596        protected java.lang.Object writeReplace()
7597            throws java.io.ObjectStreamException {
7598          return super.writeReplace();
7599        }
7600        
7601        @java.lang.Override
7602        public boolean equals(final java.lang.Object obj) {
7603          if (obj == this) {
7604           return true;
7605          }
7606          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)) {
7607            return super.equals(obj);
7608          }
7609          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto) obj;
7610          
7611          boolean result = true;
7612          result = result && (hasJid() == other.hasJid());
7613          if (hasJid()) {
7614            result = result && getJid()
7615                .equals(other.getJid());
7616          }
7617          result = result &&
7618              getUnknownFields().equals(other.getUnknownFields());
7619          return result;
7620        }
7621        
7622        @java.lang.Override
7623        public int hashCode() {
7624          int hash = 41;
7625          hash = (19 * hash) + getDescriptorForType().hashCode();
7626          if (hasJid()) {
7627            hash = (37 * hash) + JID_FIELD_NUMBER;
7628            hash = (53 * hash) + getJid().hashCode();
7629          }
7630          hash = (29 * hash) + getUnknownFields().hashCode();
7631          return hash;
7632        }
7633        
7634        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
7635            com.google.protobuf.ByteString data)
7636            throws com.google.protobuf.InvalidProtocolBufferException {
7637          return newBuilder().mergeFrom(data).buildParsed();
7638        }
7639        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
7640            com.google.protobuf.ByteString data,
7641            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7642            throws com.google.protobuf.InvalidProtocolBufferException {
7643          return newBuilder().mergeFrom(data, extensionRegistry)
7644                   .buildParsed();
7645        }
7646        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(byte[] data)
7647            throws com.google.protobuf.InvalidProtocolBufferException {
7648          return newBuilder().mergeFrom(data).buildParsed();
7649        }
7650        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
7651            byte[] data,
7652            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7653            throws com.google.protobuf.InvalidProtocolBufferException {
7654          return newBuilder().mergeFrom(data, extensionRegistry)
7655                   .buildParsed();
7656        }
7657        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(java.io.InputStream input)
7658            throws java.io.IOException {
7659          return newBuilder().mergeFrom(input).buildParsed();
7660        }
7661        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
7662            java.io.InputStream input,
7663            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7664            throws java.io.IOException {
7665          return newBuilder().mergeFrom(input, extensionRegistry)
7666                   .buildParsed();
7667        }
7668        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseDelimitedFrom(java.io.InputStream input)
7669            throws java.io.IOException {
7670          Builder builder = newBuilder();
7671          if (builder.mergeDelimitedFrom(input)) {
7672            return builder.buildParsed();
7673          } else {
7674            return null;
7675          }
7676        }
7677        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseDelimitedFrom(
7678            java.io.InputStream input,
7679            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7680            throws java.io.IOException {
7681          Builder builder = newBuilder();
7682          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
7683            return builder.buildParsed();
7684          } else {
7685            return null;
7686          }
7687        }
7688        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
7689            com.google.protobuf.CodedInputStream input)
7690            throws java.io.IOException {
7691          return newBuilder().mergeFrom(input).buildParsed();
7692        }
7693        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto parseFrom(
7694            com.google.protobuf.CodedInputStream input,
7695            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7696            throws java.io.IOException {
7697          return newBuilder().mergeFrom(input, extensionRegistry)
7698                   .buildParsed();
7699        }
7700        
7701        public static Builder newBuilder() { return Builder.create(); }
7702        public Builder newBuilderForType() { return newBuilder(); }
7703        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto prototype) {
7704          return newBuilder().mergeFrom(prototype);
7705        }
7706        public Builder toBuilder() { return newBuilder(this); }
7707        
7708        @java.lang.Override
7709        protected Builder newBuilderForType(
7710            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
7711          Builder builder = new Builder(parent);
7712          return builder;
7713        }
7714        public static final class Builder extends
7715            com.google.protobuf.GeneratedMessage.Builder<Builder>
7716           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProtoOrBuilder {
7717          public static final com.google.protobuf.Descriptors.Descriptor
7718              getDescriptor() {
7719            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor;
7720          }
7721          
7722          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
7723              internalGetFieldAccessorTable() {
7724            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateRequestProto_fieldAccessorTable;
7725          }
7726          
7727          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.newBuilder()
7728          private Builder() {
7729            maybeForceBuilderInitialization();
7730          }
7731          
7732          private Builder(BuilderParent parent) {
7733            super(parent);
7734            maybeForceBuilderInitialization();
7735          }
7736          private void maybeForceBuilderInitialization() {
7737            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
7738              getJidFieldBuilder();
7739            }
7740          }
7741          private static Builder create() {
7742            return new Builder();
7743          }
7744          
7745          public Builder clear() {
7746            super.clear();
7747            if (jidBuilder_ == null) {
7748              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
7749            } else {
7750              jidBuilder_.clear();
7751            }
7752            bitField0_ = (bitField0_ & ~0x00000001);
7753            return this;
7754          }
7755          
7756          public Builder clone() {
7757            return create().mergeFrom(buildPartial());
7758          }
7759          
7760          public com.google.protobuf.Descriptors.Descriptor
7761              getDescriptorForType() {
7762            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDescriptor();
7763          }
7764          
7765          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto getDefaultInstanceForType() {
7766            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance();
7767          }
7768          
7769          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto build() {
7770            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto result = buildPartial();
7771            if (!result.isInitialized()) {
7772              throw newUninitializedMessageException(result);
7773            }
7774            return result;
7775          }
7776          
7777          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto buildParsed()
7778              throws com.google.protobuf.InvalidProtocolBufferException {
7779            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto result = buildPartial();
7780            if (!result.isInitialized()) {
7781              throw newUninitializedMessageException(
7782                result).asInvalidProtocolBufferException();
7783            }
7784            return result;
7785          }
7786          
7787          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto buildPartial() {
7788            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto(this);
7789            int from_bitField0_ = bitField0_;
7790            int to_bitField0_ = 0;
7791            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
7792              to_bitField0_ |= 0x00000001;
7793            }
7794            if (jidBuilder_ == null) {
7795              result.jid_ = jid_;
7796            } else {
7797              result.jid_ = jidBuilder_.build();
7798            }
7799            result.bitField0_ = to_bitField0_;
7800            onBuilt();
7801            return result;
7802          }
7803          
7804          public Builder mergeFrom(com.google.protobuf.Message other) {
7805            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto) {
7806              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)other);
7807            } else {
7808              super.mergeFrom(other);
7809              return this;
7810            }
7811          }
7812          
7813          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto other) {
7814            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance()) return this;
7815            if (other.hasJid()) {
7816              mergeJid(other.getJid());
7817            }
7818            this.mergeUnknownFields(other.getUnknownFields());
7819            return this;
7820          }
7821          
7822          public final boolean isInitialized() {
7823            if (!hasJid()) {
7824              
7825              return false;
7826            }
7827            if (!getJid().isInitialized()) {
7828              
7829              return false;
7830            }
7831            return true;
7832          }
7833          
7834          public Builder mergeFrom(
7835              com.google.protobuf.CodedInputStream input,
7836              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
7837              throws java.io.IOException {
7838            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
7839              com.google.protobuf.UnknownFieldSet.newBuilder(
7840                this.getUnknownFields());
7841            while (true) {
7842              int tag = input.readTag();
7843              switch (tag) {
7844                case 0:
7845                  this.setUnknownFields(unknownFields.build());
7846                  onChanged();
7847                  return this;
7848                default: {
7849                  if (!parseUnknownField(input, unknownFields,
7850                                         extensionRegistry, tag)) {
7851                    this.setUnknownFields(unknownFields.build());
7852                    onChanged();
7853                    return this;
7854                  }
7855                  break;
7856                }
7857                case 10: {
7858                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder();
7859                  if (hasJid()) {
7860                    subBuilder.mergeFrom(getJid());
7861                  }
7862                  input.readMessage(subBuilder, extensionRegistry);
7863                  setJid(subBuilder.buildPartial());
7864                  break;
7865                }
7866              }
7867            }
7868          }
7869          
7870          private int bitField0_;
7871          
7872          // required .hadoop.hdfs.JournalIdProto jid = 1;
7873          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
7874          private com.google.protobuf.SingleFieldBuilder<
7875              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
7876          public boolean hasJid() {
7877            return ((bitField0_ & 0x00000001) == 0x00000001);
7878          }
7879          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
7880            if (jidBuilder_ == null) {
7881              return jid_;
7882            } else {
7883              return jidBuilder_.getMessage();
7884            }
7885          }
7886          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
7887            if (jidBuilder_ == null) {
7888              if (value == null) {
7889                throw new NullPointerException();
7890              }
7891              jid_ = value;
7892              onChanged();
7893            } else {
7894              jidBuilder_.setMessage(value);
7895            }
7896            bitField0_ |= 0x00000001;
7897            return this;
7898          }
7899          public Builder setJid(
7900              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
7901            if (jidBuilder_ == null) {
7902              jid_ = builderForValue.build();
7903              onChanged();
7904            } else {
7905              jidBuilder_.setMessage(builderForValue.build());
7906            }
7907            bitField0_ |= 0x00000001;
7908            return this;
7909          }
7910          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
7911            if (jidBuilder_ == null) {
7912              if (((bitField0_ & 0x00000001) == 0x00000001) &&
7913                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
7914                jid_ =
7915                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
7916              } else {
7917                jid_ = value;
7918              }
7919              onChanged();
7920            } else {
7921              jidBuilder_.mergeFrom(value);
7922            }
7923            bitField0_ |= 0x00000001;
7924            return this;
7925          }
7926          public Builder clearJid() {
7927            if (jidBuilder_ == null) {
7928              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
7929              onChanged();
7930            } else {
7931              jidBuilder_.clear();
7932            }
7933            bitField0_ = (bitField0_ & ~0x00000001);
7934            return this;
7935          }
7936          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
7937            bitField0_ |= 0x00000001;
7938            onChanged();
7939            return getJidFieldBuilder().getBuilder();
7940          }
7941          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
7942            if (jidBuilder_ != null) {
7943              return jidBuilder_.getMessageOrBuilder();
7944            } else {
7945              return jid_;
7946            }
7947          }
7948          private com.google.protobuf.SingleFieldBuilder<
7949              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
7950              getJidFieldBuilder() {
7951            if (jidBuilder_ == null) {
7952              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
7953                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
7954                      jid_,
7955                      getParentForChildren(),
7956                      isClean());
7957              jid_ = null;
7958            }
7959            return jidBuilder_;
7960          }
7961          
7962          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetJournalStateRequestProto)
7963        }
7964        
7965        static {
7966          defaultInstance = new GetJournalStateRequestProto(true);
7967          defaultInstance.initFields();
7968        }
7969        
7970        // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetJournalStateRequestProto)
7971      }
7972      
7973      public interface GetJournalStateResponseProtoOrBuilder
7974          extends com.google.protobuf.MessageOrBuilder {
7975        
7976        // required uint64 lastPromisedEpoch = 1;
7977        boolean hasLastPromisedEpoch();
7978        long getLastPromisedEpoch();
7979        
7980        // required uint32 httpPort = 2;
7981        boolean hasHttpPort();
7982        int getHttpPort();
7983      }
7984      public static final class GetJournalStateResponseProto extends
7985          com.google.protobuf.GeneratedMessage
7986          implements GetJournalStateResponseProtoOrBuilder {
7987        // Use GetJournalStateResponseProto.newBuilder() to construct.
7988        private GetJournalStateResponseProto(Builder builder) {
7989          super(builder);
7990        }
7991        private GetJournalStateResponseProto(boolean noInit) {}
7992        
7993        private static final GetJournalStateResponseProto defaultInstance;
7994        public static GetJournalStateResponseProto getDefaultInstance() {
7995          return defaultInstance;
7996        }
7997        
7998        public GetJournalStateResponseProto getDefaultInstanceForType() {
7999          return defaultInstance;
8000        }
8001        
8002        public static final com.google.protobuf.Descriptors.Descriptor
8003            getDescriptor() {
8004          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor;
8005        }
8006        
8007        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
8008            internalGetFieldAccessorTable() {
8009          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateResponseProto_fieldAccessorTable;
8010        }
8011        
8012        private int bitField0_;
8013        // required uint64 lastPromisedEpoch = 1;
8014        public static final int LASTPROMISEDEPOCH_FIELD_NUMBER = 1;
8015        private long lastPromisedEpoch_;
8016        public boolean hasLastPromisedEpoch() {
8017          return ((bitField0_ & 0x00000001) == 0x00000001);
8018        }
8019        public long getLastPromisedEpoch() {
8020          return lastPromisedEpoch_;
8021        }
8022        
8023        // required uint32 httpPort = 2;
8024        public static final int HTTPPORT_FIELD_NUMBER = 2;
8025        private int httpPort_;
8026        public boolean hasHttpPort() {
8027          return ((bitField0_ & 0x00000002) == 0x00000002);
8028        }
8029        public int getHttpPort() {
8030          return httpPort_;
8031        }
8032        
8033        private void initFields() {
8034          lastPromisedEpoch_ = 0L;
8035          httpPort_ = 0;
8036        }
8037        private byte memoizedIsInitialized = -1;
8038        public final boolean isInitialized() {
8039          byte isInitialized = memoizedIsInitialized;
8040          if (isInitialized != -1) return isInitialized == 1;
8041          
8042          if (!hasLastPromisedEpoch()) {
8043            memoizedIsInitialized = 0;
8044            return false;
8045          }
8046          if (!hasHttpPort()) {
8047            memoizedIsInitialized = 0;
8048            return false;
8049          }
8050          memoizedIsInitialized = 1;
8051          return true;
8052        }
8053        
8054        public void writeTo(com.google.protobuf.CodedOutputStream output)
8055                            throws java.io.IOException {
8056          getSerializedSize();
8057          if (((bitField0_ & 0x00000001) == 0x00000001)) {
8058            output.writeUInt64(1, lastPromisedEpoch_);
8059          }
8060          if (((bitField0_ & 0x00000002) == 0x00000002)) {
8061            output.writeUInt32(2, httpPort_);
8062          }
8063          getUnknownFields().writeTo(output);
8064        }
8065        
8066        private int memoizedSerializedSize = -1;
8067        public int getSerializedSize() {
8068          int size = memoizedSerializedSize;
8069          if (size != -1) return size;
8070        
8071          size = 0;
8072          if (((bitField0_ & 0x00000001) == 0x00000001)) {
8073            size += com.google.protobuf.CodedOutputStream
8074              .computeUInt64Size(1, lastPromisedEpoch_);
8075          }
8076          if (((bitField0_ & 0x00000002) == 0x00000002)) {
8077            size += com.google.protobuf.CodedOutputStream
8078              .computeUInt32Size(2, httpPort_);
8079          }
8080          size += getUnknownFields().getSerializedSize();
8081          memoizedSerializedSize = size;
8082          return size;
8083        }
8084        
8085        private static final long serialVersionUID = 0L;
8086        @java.lang.Override
8087        protected java.lang.Object writeReplace()
8088            throws java.io.ObjectStreamException {
8089          return super.writeReplace();
8090        }
8091        
8092        @java.lang.Override
8093        public boolean equals(final java.lang.Object obj) {
8094          if (obj == this) {
8095           return true;
8096          }
8097          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto)) {
8098            return super.equals(obj);
8099          }
8100          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) obj;
8101          
8102          boolean result = true;
8103          result = result && (hasLastPromisedEpoch() == other.hasLastPromisedEpoch());
8104          if (hasLastPromisedEpoch()) {
8105            result = result && (getLastPromisedEpoch()
8106                == other.getLastPromisedEpoch());
8107          }
8108          result = result && (hasHttpPort() == other.hasHttpPort());
8109          if (hasHttpPort()) {
8110            result = result && (getHttpPort()
8111                == other.getHttpPort());
8112          }
8113          result = result &&
8114              getUnknownFields().equals(other.getUnknownFields());
8115          return result;
8116        }
8117        
8118        @java.lang.Override
8119        public int hashCode() {
8120          int hash = 41;
8121          hash = (19 * hash) + getDescriptorForType().hashCode();
8122          if (hasLastPromisedEpoch()) {
8123            hash = (37 * hash) + LASTPROMISEDEPOCH_FIELD_NUMBER;
8124            hash = (53 * hash) + hashLong(getLastPromisedEpoch());
8125          }
8126          if (hasHttpPort()) {
8127            hash = (37 * hash) + HTTPPORT_FIELD_NUMBER;
8128            hash = (53 * hash) + getHttpPort();
8129          }
8130          hash = (29 * hash) + getUnknownFields().hashCode();
8131          return hash;
8132        }
8133        
8134        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
8135            com.google.protobuf.ByteString data)
8136            throws com.google.protobuf.InvalidProtocolBufferException {
8137          return newBuilder().mergeFrom(data).buildParsed();
8138        }
8139        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
8140            com.google.protobuf.ByteString data,
8141            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8142            throws com.google.protobuf.InvalidProtocolBufferException {
8143          return newBuilder().mergeFrom(data, extensionRegistry)
8144                   .buildParsed();
8145        }
8146        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(byte[] data)
8147            throws com.google.protobuf.InvalidProtocolBufferException {
8148          return newBuilder().mergeFrom(data).buildParsed();
8149        }
8150        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
8151            byte[] data,
8152            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8153            throws com.google.protobuf.InvalidProtocolBufferException {
8154          return newBuilder().mergeFrom(data, extensionRegistry)
8155                   .buildParsed();
8156        }
8157        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(java.io.InputStream input)
8158            throws java.io.IOException {
8159          return newBuilder().mergeFrom(input).buildParsed();
8160        }
8161        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
8162            java.io.InputStream input,
8163            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8164            throws java.io.IOException {
8165          return newBuilder().mergeFrom(input, extensionRegistry)
8166                   .buildParsed();
8167        }
8168        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseDelimitedFrom(java.io.InputStream input)
8169            throws java.io.IOException {
8170          Builder builder = newBuilder();
8171          if (builder.mergeDelimitedFrom(input)) {
8172            return builder.buildParsed();
8173          } else {
8174            return null;
8175          }
8176        }
8177        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseDelimitedFrom(
8178            java.io.InputStream input,
8179            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8180            throws java.io.IOException {
8181          Builder builder = newBuilder();
8182          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
8183            return builder.buildParsed();
8184          } else {
8185            return null;
8186          }
8187        }
8188        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
8189            com.google.protobuf.CodedInputStream input)
8190            throws java.io.IOException {
8191          return newBuilder().mergeFrom(input).buildParsed();
8192        }
8193        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto parseFrom(
8194            com.google.protobuf.CodedInputStream input,
8195            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8196            throws java.io.IOException {
8197          return newBuilder().mergeFrom(input, extensionRegistry)
8198                   .buildParsed();
8199        }
8200        
8201        public static Builder newBuilder() { return Builder.create(); }
8202        public Builder newBuilderForType() { return newBuilder(); }
8203        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto prototype) {
8204          return newBuilder().mergeFrom(prototype);
8205        }
8206        public Builder toBuilder() { return newBuilder(this); }
8207        
8208        @java.lang.Override
8209        protected Builder newBuilderForType(
8210            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8211          Builder builder = new Builder(parent);
8212          return builder;
8213        }
8214        public static final class Builder extends
8215            com.google.protobuf.GeneratedMessage.Builder<Builder>
8216           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProtoOrBuilder {
8217          public static final com.google.protobuf.Descriptors.Descriptor
8218              getDescriptor() {
8219            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor;
8220          }
8221          
8222          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
8223              internalGetFieldAccessorTable() {
8224            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetJournalStateResponseProto_fieldAccessorTable;
8225          }
8226          
8227          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.newBuilder()
8228          private Builder() {
8229            maybeForceBuilderInitialization();
8230          }
8231          
8232          private Builder(BuilderParent parent) {
8233            super(parent);
8234            maybeForceBuilderInitialization();
8235          }
8236          private void maybeForceBuilderInitialization() {
8237            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
8238            }
8239          }
8240          private static Builder create() {
8241            return new Builder();
8242          }
8243          
8244          public Builder clear() {
8245            super.clear();
8246            lastPromisedEpoch_ = 0L;
8247            bitField0_ = (bitField0_ & ~0x00000001);
8248            httpPort_ = 0;
8249            bitField0_ = (bitField0_ & ~0x00000002);
8250            return this;
8251          }
8252          
8253          public Builder clone() {
8254            return create().mergeFrom(buildPartial());
8255          }
8256          
8257          public com.google.protobuf.Descriptors.Descriptor
8258              getDescriptorForType() {
8259            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDescriptor();
8260          }
8261          
8262          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto getDefaultInstanceForType() {
8263            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance();
8264          }
8265          
8266          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto build() {
8267            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto result = buildPartial();
8268            if (!result.isInitialized()) {
8269              throw newUninitializedMessageException(result);
8270            }
8271            return result;
8272          }
8273          
8274          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto buildParsed()
8275              throws com.google.protobuf.InvalidProtocolBufferException {
8276            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto result = buildPartial();
8277            if (!result.isInitialized()) {
8278              throw newUninitializedMessageException(
8279                result).asInvalidProtocolBufferException();
8280            }
8281            return result;
8282          }
8283          
8284          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto buildPartial() {
8285            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto(this);
8286            int from_bitField0_ = bitField0_;
8287            int to_bitField0_ = 0;
8288            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
8289              to_bitField0_ |= 0x00000001;
8290            }
8291            result.lastPromisedEpoch_ = lastPromisedEpoch_;
8292            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
8293              to_bitField0_ |= 0x00000002;
8294            }
8295            result.httpPort_ = httpPort_;
8296            result.bitField0_ = to_bitField0_;
8297            onBuilt();
8298            return result;
8299          }
8300          
8301          public Builder mergeFrom(com.google.protobuf.Message other) {
8302            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) {
8303              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto)other);
8304            } else {
8305              super.mergeFrom(other);
8306              return this;
8307            }
8308          }
8309          
8310          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto other) {
8311            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance()) return this;
8312            if (other.hasLastPromisedEpoch()) {
8313              setLastPromisedEpoch(other.getLastPromisedEpoch());
8314            }
8315            if (other.hasHttpPort()) {
8316              setHttpPort(other.getHttpPort());
8317            }
8318            this.mergeUnknownFields(other.getUnknownFields());
8319            return this;
8320          }
8321          
8322          public final boolean isInitialized() {
8323            if (!hasLastPromisedEpoch()) {
8324              
8325              return false;
8326            }
8327            if (!hasHttpPort()) {
8328              
8329              return false;
8330            }
8331            return true;
8332          }
8333          
8334          public Builder mergeFrom(
8335              com.google.protobuf.CodedInputStream input,
8336              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8337              throws java.io.IOException {
8338            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
8339              com.google.protobuf.UnknownFieldSet.newBuilder(
8340                this.getUnknownFields());
8341            while (true) {
8342              int tag = input.readTag();
8343              switch (tag) {
8344                case 0:
8345                  this.setUnknownFields(unknownFields.build());
8346                  onChanged();
8347                  return this;
8348                default: {
8349                  if (!parseUnknownField(input, unknownFields,
8350                                         extensionRegistry, tag)) {
8351                    this.setUnknownFields(unknownFields.build());
8352                    onChanged();
8353                    return this;
8354                  }
8355                  break;
8356                }
8357                case 8: {
8358                  bitField0_ |= 0x00000001;
8359                  lastPromisedEpoch_ = input.readUInt64();
8360                  break;
8361                }
8362                case 16: {
8363                  bitField0_ |= 0x00000002;
8364                  httpPort_ = input.readUInt32();
8365                  break;
8366                }
8367              }
8368            }
8369          }
8370          
8371          private int bitField0_;
8372          
8373          // required uint64 lastPromisedEpoch = 1;
8374          private long lastPromisedEpoch_ ;
8375          public boolean hasLastPromisedEpoch() {
8376            return ((bitField0_ & 0x00000001) == 0x00000001);
8377          }
8378          public long getLastPromisedEpoch() {
8379            return lastPromisedEpoch_;
8380          }
8381          public Builder setLastPromisedEpoch(long value) {
8382            bitField0_ |= 0x00000001;
8383            lastPromisedEpoch_ = value;
8384            onChanged();
8385            return this;
8386          }
8387          public Builder clearLastPromisedEpoch() {
8388            bitField0_ = (bitField0_ & ~0x00000001);
8389            lastPromisedEpoch_ = 0L;
8390            onChanged();
8391            return this;
8392          }
8393          
8394          // required uint32 httpPort = 2;
8395          private int httpPort_ ;
8396          public boolean hasHttpPort() {
8397            return ((bitField0_ & 0x00000002) == 0x00000002);
8398          }
8399          public int getHttpPort() {
8400            return httpPort_;
8401          }
8402          public Builder setHttpPort(int value) {
8403            bitField0_ |= 0x00000002;
8404            httpPort_ = value;
8405            onChanged();
8406            return this;
8407          }
8408          public Builder clearHttpPort() {
8409            bitField0_ = (bitField0_ & ~0x00000002);
8410            httpPort_ = 0;
8411            onChanged();
8412            return this;
8413          }
8414          
8415          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetJournalStateResponseProto)
8416        }
8417        
8418        static {
8419          defaultInstance = new GetJournalStateResponseProto(true);
8420          defaultInstance.initFields();
8421        }
8422        
8423        // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetJournalStateResponseProto)
8424      }
8425      
8426      public interface FormatRequestProtoOrBuilder
8427          extends com.google.protobuf.MessageOrBuilder {
8428        
8429        // required .hadoop.hdfs.JournalIdProto jid = 1;
8430        boolean hasJid();
8431        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
8432        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
8433        
8434        // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
8435        boolean hasNsInfo();
8436        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo();
8437        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder();
8438      }
8439      public static final class FormatRequestProto extends
8440          com.google.protobuf.GeneratedMessage
8441          implements FormatRequestProtoOrBuilder {
8442        // Use FormatRequestProto.newBuilder() to construct.
8443        private FormatRequestProto(Builder builder) {
8444          super(builder);
8445        }
8446        private FormatRequestProto(boolean noInit) {}
8447        
8448        private static final FormatRequestProto defaultInstance;
8449        public static FormatRequestProto getDefaultInstance() {
8450          return defaultInstance;
8451        }
8452        
8453        public FormatRequestProto getDefaultInstanceForType() {
8454          return defaultInstance;
8455        }
8456        
8457        public static final com.google.protobuf.Descriptors.Descriptor
8458            getDescriptor() {
8459          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatRequestProto_descriptor;
8460        }
8461        
8462        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
8463            internalGetFieldAccessorTable() {
8464          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatRequestProto_fieldAccessorTable;
8465        }
8466        
8467        private int bitField0_;
8468        // required .hadoop.hdfs.JournalIdProto jid = 1;
8469        public static final int JID_FIELD_NUMBER = 1;
8470        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
8471        public boolean hasJid() {
8472          return ((bitField0_ & 0x00000001) == 0x00000001);
8473        }
8474        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
8475          return jid_;
8476        }
8477        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
8478          return jid_;
8479        }
8480        
8481        // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
8482        public static final int NSINFO_FIELD_NUMBER = 2;
8483        private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_;
8484        public boolean hasNsInfo() {
8485          return ((bitField0_ & 0x00000002) == 0x00000002);
8486        }
8487        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
8488          return nsInfo_;
8489        }
8490        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
8491          return nsInfo_;
8492        }
8493        
8494        private void initFields() {
8495          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
8496          nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
8497        }
8498        private byte memoizedIsInitialized = -1;
8499        public final boolean isInitialized() {
8500          byte isInitialized = memoizedIsInitialized;
8501          if (isInitialized != -1) return isInitialized == 1;
8502          
8503          if (!hasJid()) {
8504            memoizedIsInitialized = 0;
8505            return false;
8506          }
8507          if (!hasNsInfo()) {
8508            memoizedIsInitialized = 0;
8509            return false;
8510          }
8511          if (!getJid().isInitialized()) {
8512            memoizedIsInitialized = 0;
8513            return false;
8514          }
8515          if (!getNsInfo().isInitialized()) {
8516            memoizedIsInitialized = 0;
8517            return false;
8518          }
8519          memoizedIsInitialized = 1;
8520          return true;
8521        }
8522        
8523        public void writeTo(com.google.protobuf.CodedOutputStream output)
8524                            throws java.io.IOException {
8525          getSerializedSize();
8526          if (((bitField0_ & 0x00000001) == 0x00000001)) {
8527            output.writeMessage(1, jid_);
8528          }
8529          if (((bitField0_ & 0x00000002) == 0x00000002)) {
8530            output.writeMessage(2, nsInfo_);
8531          }
8532          getUnknownFields().writeTo(output);
8533        }
8534        
8535        private int memoizedSerializedSize = -1;
8536        public int getSerializedSize() {
8537          int size = memoizedSerializedSize;
8538          if (size != -1) return size;
8539        
8540          size = 0;
8541          if (((bitField0_ & 0x00000001) == 0x00000001)) {
8542            size += com.google.protobuf.CodedOutputStream
8543              .computeMessageSize(1, jid_);
8544          }
8545          if (((bitField0_ & 0x00000002) == 0x00000002)) {
8546            size += com.google.protobuf.CodedOutputStream
8547              .computeMessageSize(2, nsInfo_);
8548          }
8549          size += getUnknownFields().getSerializedSize();
8550          memoizedSerializedSize = size;
8551          return size;
8552        }
8553        
8554        private static final long serialVersionUID = 0L;
8555        @java.lang.Override
8556        protected java.lang.Object writeReplace()
8557            throws java.io.ObjectStreamException {
8558          return super.writeReplace();
8559        }
8560        
8561        @java.lang.Override
8562        public boolean equals(final java.lang.Object obj) {
8563          if (obj == this) {
8564           return true;
8565          }
8566          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)) {
8567            return super.equals(obj);
8568          }
8569          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto) obj;
8570          
8571          boolean result = true;
8572          result = result && (hasJid() == other.hasJid());
8573          if (hasJid()) {
8574            result = result && getJid()
8575                .equals(other.getJid());
8576          }
8577          result = result && (hasNsInfo() == other.hasNsInfo());
8578          if (hasNsInfo()) {
8579            result = result && getNsInfo()
8580                .equals(other.getNsInfo());
8581          }
8582          result = result &&
8583              getUnknownFields().equals(other.getUnknownFields());
8584          return result;
8585        }
8586        
8587        @java.lang.Override
8588        public int hashCode() {
8589          int hash = 41;
8590          hash = (19 * hash) + getDescriptorForType().hashCode();
8591          if (hasJid()) {
8592            hash = (37 * hash) + JID_FIELD_NUMBER;
8593            hash = (53 * hash) + getJid().hashCode();
8594          }
8595          if (hasNsInfo()) {
8596            hash = (37 * hash) + NSINFO_FIELD_NUMBER;
8597            hash = (53 * hash) + getNsInfo().hashCode();
8598          }
8599          hash = (29 * hash) + getUnknownFields().hashCode();
8600          return hash;
8601        }
8602        
8603        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
8604            com.google.protobuf.ByteString data)
8605            throws com.google.protobuf.InvalidProtocolBufferException {
8606          return newBuilder().mergeFrom(data).buildParsed();
8607        }
8608        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
8609            com.google.protobuf.ByteString data,
8610            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8611            throws com.google.protobuf.InvalidProtocolBufferException {
8612          return newBuilder().mergeFrom(data, extensionRegistry)
8613                   .buildParsed();
8614        }
8615        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(byte[] data)
8616            throws com.google.protobuf.InvalidProtocolBufferException {
8617          return newBuilder().mergeFrom(data).buildParsed();
8618        }
8619        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
8620            byte[] data,
8621            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8622            throws com.google.protobuf.InvalidProtocolBufferException {
8623          return newBuilder().mergeFrom(data, extensionRegistry)
8624                   .buildParsed();
8625        }
8626        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(java.io.InputStream input)
8627            throws java.io.IOException {
8628          return newBuilder().mergeFrom(input).buildParsed();
8629        }
8630        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
8631            java.io.InputStream input,
8632            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8633            throws java.io.IOException {
8634          return newBuilder().mergeFrom(input, extensionRegistry)
8635                   .buildParsed();
8636        }
8637        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseDelimitedFrom(java.io.InputStream input)
8638            throws java.io.IOException {
8639          Builder builder = newBuilder();
8640          if (builder.mergeDelimitedFrom(input)) {
8641            return builder.buildParsed();
8642          } else {
8643            return null;
8644          }
8645        }
8646        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseDelimitedFrom(
8647            java.io.InputStream input,
8648            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8649            throws java.io.IOException {
8650          Builder builder = newBuilder();
8651          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
8652            return builder.buildParsed();
8653          } else {
8654            return null;
8655          }
8656        }
8657        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
8658            com.google.protobuf.CodedInputStream input)
8659            throws java.io.IOException {
8660          return newBuilder().mergeFrom(input).buildParsed();
8661        }
8662        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto parseFrom(
8663            com.google.protobuf.CodedInputStream input,
8664            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8665            throws java.io.IOException {
8666          return newBuilder().mergeFrom(input, extensionRegistry)
8667                   .buildParsed();
8668        }
8669        
8670        public static Builder newBuilder() { return Builder.create(); }
8671        public Builder newBuilderForType() { return newBuilder(); }
8672        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto prototype) {
8673          return newBuilder().mergeFrom(prototype);
8674        }
8675        public Builder toBuilder() { return newBuilder(this); }
8676        
8677        @java.lang.Override
8678        protected Builder newBuilderForType(
8679            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
8680          Builder builder = new Builder(parent);
8681          return builder;
8682        }
8683        public static final class Builder extends
8684            com.google.protobuf.GeneratedMessage.Builder<Builder>
8685           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProtoOrBuilder {
8686          public static final com.google.protobuf.Descriptors.Descriptor
8687              getDescriptor() {
8688            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatRequestProto_descriptor;
8689          }
8690          
8691          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
8692              internalGetFieldAccessorTable() {
8693            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatRequestProto_fieldAccessorTable;
8694          }
8695          
8696          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.newBuilder()
8697          private Builder() {
8698            maybeForceBuilderInitialization();
8699          }
8700          
8701          private Builder(BuilderParent parent) {
8702            super(parent);
8703            maybeForceBuilderInitialization();
8704          }
8705          private void maybeForceBuilderInitialization() {
8706            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
8707              getJidFieldBuilder();
8708              getNsInfoFieldBuilder();
8709            }
8710          }
8711          private static Builder create() {
8712            return new Builder();
8713          }
8714          
8715          public Builder clear() {
8716            super.clear();
8717            if (jidBuilder_ == null) {
8718              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
8719            } else {
8720              jidBuilder_.clear();
8721            }
8722            bitField0_ = (bitField0_ & ~0x00000001);
8723            if (nsInfoBuilder_ == null) {
8724              nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
8725            } else {
8726              nsInfoBuilder_.clear();
8727            }
8728            bitField0_ = (bitField0_ & ~0x00000002);
8729            return this;
8730          }
8731          
8732          public Builder clone() {
8733            return create().mergeFrom(buildPartial());
8734          }
8735          
8736          public com.google.protobuf.Descriptors.Descriptor
8737              getDescriptorForType() {
8738            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDescriptor();
8739          }
8740          
8741          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto getDefaultInstanceForType() {
8742            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance();
8743          }
8744          
8745          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto build() {
8746            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto result = buildPartial();
8747            if (!result.isInitialized()) {
8748              throw newUninitializedMessageException(result);
8749            }
8750            return result;
8751          }
8752          
8753          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto buildParsed()
8754              throws com.google.protobuf.InvalidProtocolBufferException {
8755            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto result = buildPartial();
8756            if (!result.isInitialized()) {
8757              throw newUninitializedMessageException(
8758                result).asInvalidProtocolBufferException();
8759            }
8760            return result;
8761          }
8762          
8763          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto buildPartial() {
8764            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto(this);
8765            int from_bitField0_ = bitField0_;
8766            int to_bitField0_ = 0;
8767            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
8768              to_bitField0_ |= 0x00000001;
8769            }
8770            if (jidBuilder_ == null) {
8771              result.jid_ = jid_;
8772            } else {
8773              result.jid_ = jidBuilder_.build();
8774            }
8775            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
8776              to_bitField0_ |= 0x00000002;
8777            }
8778            if (nsInfoBuilder_ == null) {
8779              result.nsInfo_ = nsInfo_;
8780            } else {
8781              result.nsInfo_ = nsInfoBuilder_.build();
8782            }
8783            result.bitField0_ = to_bitField0_;
8784            onBuilt();
8785            return result;
8786          }
8787          
8788          public Builder mergeFrom(com.google.protobuf.Message other) {
8789            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto) {
8790              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)other);
8791            } else {
8792              super.mergeFrom(other);
8793              return this;
8794            }
8795          }
8796          
8797          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto other) {
8798            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance()) return this;
8799            if (other.hasJid()) {
8800              mergeJid(other.getJid());
8801            }
8802            if (other.hasNsInfo()) {
8803              mergeNsInfo(other.getNsInfo());
8804            }
8805            this.mergeUnknownFields(other.getUnknownFields());
8806            return this;
8807          }
8808          
8809          public final boolean isInitialized() {
8810            if (!hasJid()) {
8811              
8812              return false;
8813            }
8814            if (!hasNsInfo()) {
8815              
8816              return false;
8817            }
8818            if (!getJid().isInitialized()) {
8819              
8820              return false;
8821            }
8822            if (!getNsInfo().isInitialized()) {
8823              
8824              return false;
8825            }
8826            return true;
8827          }
8828          
8829          public Builder mergeFrom(
8830              com.google.protobuf.CodedInputStream input,
8831              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
8832              throws java.io.IOException {
8833            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
8834              com.google.protobuf.UnknownFieldSet.newBuilder(
8835                this.getUnknownFields());
8836            while (true) {
8837              int tag = input.readTag();
8838              switch (tag) {
8839                case 0:
8840                  this.setUnknownFields(unknownFields.build());
8841                  onChanged();
8842                  return this;
8843                default: {
8844                  if (!parseUnknownField(input, unknownFields,
8845                                         extensionRegistry, tag)) {
8846                    this.setUnknownFields(unknownFields.build());
8847                    onChanged();
8848                    return this;
8849                  }
8850                  break;
8851                }
8852                case 10: {
8853                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder();
8854                  if (hasJid()) {
8855                    subBuilder.mergeFrom(getJid());
8856                  }
8857                  input.readMessage(subBuilder, extensionRegistry);
8858                  setJid(subBuilder.buildPartial());
8859                  break;
8860                }
8861                case 18: {
8862                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.newBuilder();
8863                  if (hasNsInfo()) {
8864                    subBuilder.mergeFrom(getNsInfo());
8865                  }
8866                  input.readMessage(subBuilder, extensionRegistry);
8867                  setNsInfo(subBuilder.buildPartial());
8868                  break;
8869                }
8870              }
8871            }
8872          }
8873          
8874          private int bitField0_;
8875          
8876          // required .hadoop.hdfs.JournalIdProto jid = 1;
8877          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
8878          private com.google.protobuf.SingleFieldBuilder<
8879              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
8880          public boolean hasJid() {
8881            return ((bitField0_ & 0x00000001) == 0x00000001);
8882          }
8883          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
8884            if (jidBuilder_ == null) {
8885              return jid_;
8886            } else {
8887              return jidBuilder_.getMessage();
8888            }
8889          }
8890          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
8891            if (jidBuilder_ == null) {
8892              if (value == null) {
8893                throw new NullPointerException();
8894              }
8895              jid_ = value;
8896              onChanged();
8897            } else {
8898              jidBuilder_.setMessage(value);
8899            }
8900            bitField0_ |= 0x00000001;
8901            return this;
8902          }
8903          public Builder setJid(
8904              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
8905            if (jidBuilder_ == null) {
8906              jid_ = builderForValue.build();
8907              onChanged();
8908            } else {
8909              jidBuilder_.setMessage(builderForValue.build());
8910            }
8911            bitField0_ |= 0x00000001;
8912            return this;
8913          }
8914          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
8915            if (jidBuilder_ == null) {
8916              if (((bitField0_ & 0x00000001) == 0x00000001) &&
8917                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
8918                jid_ =
8919                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
8920              } else {
8921                jid_ = value;
8922              }
8923              onChanged();
8924            } else {
8925              jidBuilder_.mergeFrom(value);
8926            }
8927            bitField0_ |= 0x00000001;
8928            return this;
8929          }
8930          public Builder clearJid() {
8931            if (jidBuilder_ == null) {
8932              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
8933              onChanged();
8934            } else {
8935              jidBuilder_.clear();
8936            }
8937            bitField0_ = (bitField0_ & ~0x00000001);
8938            return this;
8939          }
8940          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
8941            bitField0_ |= 0x00000001;
8942            onChanged();
8943            return getJidFieldBuilder().getBuilder();
8944          }
8945          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
8946            if (jidBuilder_ != null) {
8947              return jidBuilder_.getMessageOrBuilder();
8948            } else {
8949              return jid_;
8950            }
8951          }
8952          private com.google.protobuf.SingleFieldBuilder<
8953              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
8954              getJidFieldBuilder() {
8955            if (jidBuilder_ == null) {
8956              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
8957                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
8958                      jid_,
8959                      getParentForChildren(),
8960                      isClean());
8961              jid_ = null;
8962            }
8963            return jidBuilder_;
8964          }
8965          
8966          // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
8967          private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
8968          private com.google.protobuf.SingleFieldBuilder<
8969              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> nsInfoBuilder_;
8970          public boolean hasNsInfo() {
8971            return ((bitField0_ & 0x00000002) == 0x00000002);
8972          }
8973          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
8974            if (nsInfoBuilder_ == null) {
8975              return nsInfo_;
8976            } else {
8977              return nsInfoBuilder_.getMessage();
8978            }
8979          }
8980          public Builder setNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
8981            if (nsInfoBuilder_ == null) {
8982              if (value == null) {
8983                throw new NullPointerException();
8984              }
8985              nsInfo_ = value;
8986              onChanged();
8987            } else {
8988              nsInfoBuilder_.setMessage(value);
8989            }
8990            bitField0_ |= 0x00000002;
8991            return this;
8992          }
8993          public Builder setNsInfo(
8994              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder builderForValue) {
8995            if (nsInfoBuilder_ == null) {
8996              nsInfo_ = builderForValue.build();
8997              onChanged();
8998            } else {
8999              nsInfoBuilder_.setMessage(builderForValue.build());
9000            }
9001            bitField0_ |= 0x00000002;
9002            return this;
9003          }
9004          public Builder mergeNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
9005            if (nsInfoBuilder_ == null) {
9006              if (((bitField0_ & 0x00000002) == 0x00000002) &&
9007                  nsInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance()) {
9008                nsInfo_ =
9009                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.newBuilder(nsInfo_).mergeFrom(value).buildPartial();
9010              } else {
9011                nsInfo_ = value;
9012              }
9013              onChanged();
9014            } else {
9015              nsInfoBuilder_.mergeFrom(value);
9016            }
9017            bitField0_ |= 0x00000002;
9018            return this;
9019          }
9020          public Builder clearNsInfo() {
9021            if (nsInfoBuilder_ == null) {
9022              nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
9023              onChanged();
9024            } else {
9025              nsInfoBuilder_.clear();
9026            }
9027            bitField0_ = (bitField0_ & ~0x00000002);
9028            return this;
9029          }
9030          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder getNsInfoBuilder() {
9031            bitField0_ |= 0x00000002;
9032            onChanged();
9033            return getNsInfoFieldBuilder().getBuilder();
9034          }
9035          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
9036            if (nsInfoBuilder_ != null) {
9037              return nsInfoBuilder_.getMessageOrBuilder();
9038            } else {
9039              return nsInfo_;
9040            }
9041          }
9042          private com.google.protobuf.SingleFieldBuilder<
9043              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> 
9044              getNsInfoFieldBuilder() {
9045            if (nsInfoBuilder_ == null) {
9046              nsInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
9047                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder>(
9048                      nsInfo_,
9049                      getParentForChildren(),
9050                      isClean());
9051              nsInfo_ = null;
9052            }
9053            return nsInfoBuilder_;
9054          }
9055          
9056          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FormatRequestProto)
9057        }
9058        
9059        static {
9060          defaultInstance = new FormatRequestProto(true);
9061          defaultInstance.initFields();
9062        }
9063        
9064        // @@protoc_insertion_point(class_scope:hadoop.hdfs.FormatRequestProto)
9065      }
9066      
9067      public interface FormatResponseProtoOrBuilder
9068          extends com.google.protobuf.MessageOrBuilder {
9069      }
9070      public static final class FormatResponseProto extends
9071          com.google.protobuf.GeneratedMessage
9072          implements FormatResponseProtoOrBuilder {
9073        // Use FormatResponseProto.newBuilder() to construct.
9074        private FormatResponseProto(Builder builder) {
9075          super(builder);
9076        }
9077        private FormatResponseProto(boolean noInit) {}
9078        
9079        private static final FormatResponseProto defaultInstance;
9080        public static FormatResponseProto getDefaultInstance() {
9081          return defaultInstance;
9082        }
9083        
9084        public FormatResponseProto getDefaultInstanceForType() {
9085          return defaultInstance;
9086        }
9087        
9088        public static final com.google.protobuf.Descriptors.Descriptor
9089            getDescriptor() {
9090          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatResponseProto_descriptor;
9091        }
9092        
9093        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
9094            internalGetFieldAccessorTable() {
9095          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatResponseProto_fieldAccessorTable;
9096        }
9097        
9098        private void initFields() {
9099        }
9100        private byte memoizedIsInitialized = -1;
9101        public final boolean isInitialized() {
9102          byte isInitialized = memoizedIsInitialized;
9103          if (isInitialized != -1) return isInitialized == 1;
9104          
9105          memoizedIsInitialized = 1;
9106          return true;
9107        }
9108        
9109        public void writeTo(com.google.protobuf.CodedOutputStream output)
9110                            throws java.io.IOException {
9111          getSerializedSize();
9112          getUnknownFields().writeTo(output);
9113        }
9114        
9115        private int memoizedSerializedSize = -1;
9116        public int getSerializedSize() {
9117          int size = memoizedSerializedSize;
9118          if (size != -1) return size;
9119        
9120          size = 0;
9121          size += getUnknownFields().getSerializedSize();
9122          memoizedSerializedSize = size;
9123          return size;
9124        }
9125        
9126        private static final long serialVersionUID = 0L;
9127        @java.lang.Override
9128        protected java.lang.Object writeReplace()
9129            throws java.io.ObjectStreamException {
9130          return super.writeReplace();
9131        }
9132        
9133        @java.lang.Override
9134        public boolean equals(final java.lang.Object obj) {
9135          if (obj == this) {
9136           return true;
9137          }
9138          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto)) {
9139            return super.equals(obj);
9140          }
9141          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) obj;
9142          
9143          boolean result = true;
9144          result = result &&
9145              getUnknownFields().equals(other.getUnknownFields());
9146          return result;
9147        }
9148        
9149        @java.lang.Override
9150        public int hashCode() {
9151          int hash = 41;
9152          hash = (19 * hash) + getDescriptorForType().hashCode();
9153          hash = (29 * hash) + getUnknownFields().hashCode();
9154          return hash;
9155        }
9156        
9157        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
9158            com.google.protobuf.ByteString data)
9159            throws com.google.protobuf.InvalidProtocolBufferException {
9160          return newBuilder().mergeFrom(data).buildParsed();
9161        }
9162        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
9163            com.google.protobuf.ByteString data,
9164            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9165            throws com.google.protobuf.InvalidProtocolBufferException {
9166          return newBuilder().mergeFrom(data, extensionRegistry)
9167                   .buildParsed();
9168        }
9169        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(byte[] data)
9170            throws com.google.protobuf.InvalidProtocolBufferException {
9171          return newBuilder().mergeFrom(data).buildParsed();
9172        }
9173        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
9174            byte[] data,
9175            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9176            throws com.google.protobuf.InvalidProtocolBufferException {
9177          return newBuilder().mergeFrom(data, extensionRegistry)
9178                   .buildParsed();
9179        }
9180        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(java.io.InputStream input)
9181            throws java.io.IOException {
9182          return newBuilder().mergeFrom(input).buildParsed();
9183        }
9184        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
9185            java.io.InputStream input,
9186            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9187            throws java.io.IOException {
9188          return newBuilder().mergeFrom(input, extensionRegistry)
9189                   .buildParsed();
9190        }
9191        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseDelimitedFrom(java.io.InputStream input)
9192            throws java.io.IOException {
9193          Builder builder = newBuilder();
9194          if (builder.mergeDelimitedFrom(input)) {
9195            return builder.buildParsed();
9196          } else {
9197            return null;
9198          }
9199        }
9200        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseDelimitedFrom(
9201            java.io.InputStream input,
9202            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9203            throws java.io.IOException {
9204          Builder builder = newBuilder();
9205          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
9206            return builder.buildParsed();
9207          } else {
9208            return null;
9209          }
9210        }
9211        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
9212            com.google.protobuf.CodedInputStream input)
9213            throws java.io.IOException {
9214          return newBuilder().mergeFrom(input).buildParsed();
9215        }
9216        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto parseFrom(
9217            com.google.protobuf.CodedInputStream input,
9218            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9219            throws java.io.IOException {
9220          return newBuilder().mergeFrom(input, extensionRegistry)
9221                   .buildParsed();
9222        }
9223        
9224        public static Builder newBuilder() { return Builder.create(); }
9225        public Builder newBuilderForType() { return newBuilder(); }
9226        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto prototype) {
9227          return newBuilder().mergeFrom(prototype);
9228        }
9229        public Builder toBuilder() { return newBuilder(this); }
9230        
9231        @java.lang.Override
9232        protected Builder newBuilderForType(
9233            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
9234          Builder builder = new Builder(parent);
9235          return builder;
9236        }
9237        public static final class Builder extends
9238            com.google.protobuf.GeneratedMessage.Builder<Builder>
9239           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProtoOrBuilder {
9240          public static final com.google.protobuf.Descriptors.Descriptor
9241              getDescriptor() {
9242            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatResponseProto_descriptor;
9243          }
9244          
9245          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
9246              internalGetFieldAccessorTable() {
9247            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_FormatResponseProto_fieldAccessorTable;
9248          }
9249          
9250          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.newBuilder()
9251          private Builder() {
9252            maybeForceBuilderInitialization();
9253          }
9254          
9255          private Builder(BuilderParent parent) {
9256            super(parent);
9257            maybeForceBuilderInitialization();
9258          }
9259          private void maybeForceBuilderInitialization() {
9260            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
9261            }
9262          }
9263          private static Builder create() {
9264            return new Builder();
9265          }
9266          
9267          public Builder clear() {
9268            super.clear();
9269            return this;
9270          }
9271          
9272          public Builder clone() {
9273            return create().mergeFrom(buildPartial());
9274          }
9275          
9276          public com.google.protobuf.Descriptors.Descriptor
9277              getDescriptorForType() {
9278            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDescriptor();
9279          }
9280          
9281          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto getDefaultInstanceForType() {
9282            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance();
9283          }
9284          
9285          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto build() {
9286            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto result = buildPartial();
9287            if (!result.isInitialized()) {
9288              throw newUninitializedMessageException(result);
9289            }
9290            return result;
9291          }
9292          
9293          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto buildParsed()
9294              throws com.google.protobuf.InvalidProtocolBufferException {
9295            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto result = buildPartial();
9296            if (!result.isInitialized()) {
9297              throw newUninitializedMessageException(
9298                result).asInvalidProtocolBufferException();
9299            }
9300            return result;
9301          }
9302          
9303          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto buildPartial() {
9304            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto(this);
9305            onBuilt();
9306            return result;
9307          }
9308          
9309          public Builder mergeFrom(com.google.protobuf.Message other) {
9310            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) {
9311              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto)other);
9312            } else {
9313              super.mergeFrom(other);
9314              return this;
9315            }
9316          }
9317          
9318          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto other) {
9319            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance()) return this;
9320            this.mergeUnknownFields(other.getUnknownFields());
9321            return this;
9322          }
9323          
9324          public final boolean isInitialized() {
9325            return true;
9326          }
9327          
9328          public Builder mergeFrom(
9329              com.google.protobuf.CodedInputStream input,
9330              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9331              throws java.io.IOException {
9332            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
9333              com.google.protobuf.UnknownFieldSet.newBuilder(
9334                this.getUnknownFields());
9335            while (true) {
9336              int tag = input.readTag();
9337              switch (tag) {
9338                case 0:
9339                  this.setUnknownFields(unknownFields.build());
9340                  onChanged();
9341                  return this;
9342                default: {
9343                  if (!parseUnknownField(input, unknownFields,
9344                                         extensionRegistry, tag)) {
9345                    this.setUnknownFields(unknownFields.build());
9346                    onChanged();
9347                    return this;
9348                  }
9349                  break;
9350                }
9351              }
9352            }
9353          }
9354          
9355          
9356          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.FormatResponseProto)
9357        }
9358        
9359        static {
9360          defaultInstance = new FormatResponseProto(true);
9361          defaultInstance.initFields();
9362        }
9363        
9364        // @@protoc_insertion_point(class_scope:hadoop.hdfs.FormatResponseProto)
9365      }
9366      
9367      public interface NewEpochRequestProtoOrBuilder
9368          extends com.google.protobuf.MessageOrBuilder {
9369        
9370        // required .hadoop.hdfs.JournalIdProto jid = 1;
9371        boolean hasJid();
9372        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
9373        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
9374        
9375        // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
9376        boolean hasNsInfo();
9377        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo();
9378        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder();
9379        
9380        // required uint64 epoch = 3;
9381        boolean hasEpoch();
9382        long getEpoch();
9383      }
9384      public static final class NewEpochRequestProto extends
9385          com.google.protobuf.GeneratedMessage
9386          implements NewEpochRequestProtoOrBuilder {
9387        // Use NewEpochRequestProto.newBuilder() to construct.
9388        private NewEpochRequestProto(Builder builder) {
9389          super(builder);
9390        }
9391        private NewEpochRequestProto(boolean noInit) {}
9392        
9393        private static final NewEpochRequestProto defaultInstance;
9394        public static NewEpochRequestProto getDefaultInstance() {
9395          return defaultInstance;
9396        }
9397        
9398        public NewEpochRequestProto getDefaultInstanceForType() {
9399          return defaultInstance;
9400        }
9401        
9402        public static final com.google.protobuf.Descriptors.Descriptor
9403            getDescriptor() {
9404          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor;
9405        }
9406        
9407        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
9408            internalGetFieldAccessorTable() {
9409          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochRequestProto_fieldAccessorTable;
9410        }
9411        
9412        private int bitField0_;
9413        // required .hadoop.hdfs.JournalIdProto jid = 1;
9414        public static final int JID_FIELD_NUMBER = 1;
9415        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
9416        public boolean hasJid() {
9417          return ((bitField0_ & 0x00000001) == 0x00000001);
9418        }
9419        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
9420          return jid_;
9421        }
9422        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
9423          return jid_;
9424        }
9425        
9426        // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
9427        public static final int NSINFO_FIELD_NUMBER = 2;
9428        private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_;
9429        public boolean hasNsInfo() {
9430          return ((bitField0_ & 0x00000002) == 0x00000002);
9431        }
9432        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
9433          return nsInfo_;
9434        }
9435        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
9436          return nsInfo_;
9437        }
9438        
9439        // required uint64 epoch = 3;
9440        public static final int EPOCH_FIELD_NUMBER = 3;
9441        private long epoch_;
9442        public boolean hasEpoch() {
9443          return ((bitField0_ & 0x00000004) == 0x00000004);
9444        }
9445        public long getEpoch() {
9446          return epoch_;
9447        }
9448        
9449        private void initFields() {
9450          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
9451          nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
9452          epoch_ = 0L;
9453        }
9454        private byte memoizedIsInitialized = -1;
9455        public final boolean isInitialized() {
9456          byte isInitialized = memoizedIsInitialized;
9457          if (isInitialized != -1) return isInitialized == 1;
9458          
9459          if (!hasJid()) {
9460            memoizedIsInitialized = 0;
9461            return false;
9462          }
9463          if (!hasNsInfo()) {
9464            memoizedIsInitialized = 0;
9465            return false;
9466          }
9467          if (!hasEpoch()) {
9468            memoizedIsInitialized = 0;
9469            return false;
9470          }
9471          if (!getJid().isInitialized()) {
9472            memoizedIsInitialized = 0;
9473            return false;
9474          }
9475          if (!getNsInfo().isInitialized()) {
9476            memoizedIsInitialized = 0;
9477            return false;
9478          }
9479          memoizedIsInitialized = 1;
9480          return true;
9481        }
9482        
9483        public void writeTo(com.google.protobuf.CodedOutputStream output)
9484                            throws java.io.IOException {
9485          getSerializedSize();
9486          if (((bitField0_ & 0x00000001) == 0x00000001)) {
9487            output.writeMessage(1, jid_);
9488          }
9489          if (((bitField0_ & 0x00000002) == 0x00000002)) {
9490            output.writeMessage(2, nsInfo_);
9491          }
9492          if (((bitField0_ & 0x00000004) == 0x00000004)) {
9493            output.writeUInt64(3, epoch_);
9494          }
9495          getUnknownFields().writeTo(output);
9496        }
9497        
9498        private int memoizedSerializedSize = -1;
9499        public int getSerializedSize() {
9500          int size = memoizedSerializedSize;
9501          if (size != -1) return size;
9502        
9503          size = 0;
9504          if (((bitField0_ & 0x00000001) == 0x00000001)) {
9505            size += com.google.protobuf.CodedOutputStream
9506              .computeMessageSize(1, jid_);
9507          }
9508          if (((bitField0_ & 0x00000002) == 0x00000002)) {
9509            size += com.google.protobuf.CodedOutputStream
9510              .computeMessageSize(2, nsInfo_);
9511          }
9512          if (((bitField0_ & 0x00000004) == 0x00000004)) {
9513            size += com.google.protobuf.CodedOutputStream
9514              .computeUInt64Size(3, epoch_);
9515          }
9516          size += getUnknownFields().getSerializedSize();
9517          memoizedSerializedSize = size;
9518          return size;
9519        }
9520        
9521        private static final long serialVersionUID = 0L;
9522        @java.lang.Override
9523        protected java.lang.Object writeReplace()
9524            throws java.io.ObjectStreamException {
9525          return super.writeReplace();
9526        }
9527        
9528        @java.lang.Override
9529        public boolean equals(final java.lang.Object obj) {
9530          if (obj == this) {
9531           return true;
9532          }
9533          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)) {
9534            return super.equals(obj);
9535          }
9536          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto) obj;
9537          
9538          boolean result = true;
9539          result = result && (hasJid() == other.hasJid());
9540          if (hasJid()) {
9541            result = result && getJid()
9542                .equals(other.getJid());
9543          }
9544          result = result && (hasNsInfo() == other.hasNsInfo());
9545          if (hasNsInfo()) {
9546            result = result && getNsInfo()
9547                .equals(other.getNsInfo());
9548          }
9549          result = result && (hasEpoch() == other.hasEpoch());
9550          if (hasEpoch()) {
9551            result = result && (getEpoch()
9552                == other.getEpoch());
9553          }
9554          result = result &&
9555              getUnknownFields().equals(other.getUnknownFields());
9556          return result;
9557        }
9558        
9559        @java.lang.Override
9560        public int hashCode() {
9561          int hash = 41;
9562          hash = (19 * hash) + getDescriptorForType().hashCode();
9563          if (hasJid()) {
9564            hash = (37 * hash) + JID_FIELD_NUMBER;
9565            hash = (53 * hash) + getJid().hashCode();
9566          }
9567          if (hasNsInfo()) {
9568            hash = (37 * hash) + NSINFO_FIELD_NUMBER;
9569            hash = (53 * hash) + getNsInfo().hashCode();
9570          }
9571          if (hasEpoch()) {
9572            hash = (37 * hash) + EPOCH_FIELD_NUMBER;
9573            hash = (53 * hash) + hashLong(getEpoch());
9574          }
9575          hash = (29 * hash) + getUnknownFields().hashCode();
9576          return hash;
9577        }
9578        
9579        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
9580            com.google.protobuf.ByteString data)
9581            throws com.google.protobuf.InvalidProtocolBufferException {
9582          return newBuilder().mergeFrom(data).buildParsed();
9583        }
9584        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
9585            com.google.protobuf.ByteString data,
9586            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9587            throws com.google.protobuf.InvalidProtocolBufferException {
9588          return newBuilder().mergeFrom(data, extensionRegistry)
9589                   .buildParsed();
9590        }
9591        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(byte[] data)
9592            throws com.google.protobuf.InvalidProtocolBufferException {
9593          return newBuilder().mergeFrom(data).buildParsed();
9594        }
9595        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
9596            byte[] data,
9597            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9598            throws com.google.protobuf.InvalidProtocolBufferException {
9599          return newBuilder().mergeFrom(data, extensionRegistry)
9600                   .buildParsed();
9601        }
9602        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(java.io.InputStream input)
9603            throws java.io.IOException {
9604          return newBuilder().mergeFrom(input).buildParsed();
9605        }
9606        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
9607            java.io.InputStream input,
9608            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9609            throws java.io.IOException {
9610          return newBuilder().mergeFrom(input, extensionRegistry)
9611                   .buildParsed();
9612        }
9613        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseDelimitedFrom(java.io.InputStream input)
9614            throws java.io.IOException {
9615          Builder builder = newBuilder();
9616          if (builder.mergeDelimitedFrom(input)) {
9617            return builder.buildParsed();
9618          } else {
9619            return null;
9620          }
9621        }
9622        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseDelimitedFrom(
9623            java.io.InputStream input,
9624            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9625            throws java.io.IOException {
9626          Builder builder = newBuilder();
9627          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
9628            return builder.buildParsed();
9629          } else {
9630            return null;
9631          }
9632        }
9633        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
9634            com.google.protobuf.CodedInputStream input)
9635            throws java.io.IOException {
9636          return newBuilder().mergeFrom(input).buildParsed();
9637        }
9638        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto parseFrom(
9639            com.google.protobuf.CodedInputStream input,
9640            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9641            throws java.io.IOException {
9642          return newBuilder().mergeFrom(input, extensionRegistry)
9643                   .buildParsed();
9644        }
9645        
9646        public static Builder newBuilder() { return Builder.create(); }
9647        public Builder newBuilderForType() { return newBuilder(); }
9648        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto prototype) {
9649          return newBuilder().mergeFrom(prototype);
9650        }
9651        public Builder toBuilder() { return newBuilder(this); }
9652        
9653        @java.lang.Override
9654        protected Builder newBuilderForType(
9655            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
9656          Builder builder = new Builder(parent);
9657          return builder;
9658        }
9659        public static final class Builder extends
9660            com.google.protobuf.GeneratedMessage.Builder<Builder>
9661           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProtoOrBuilder {
9662          public static final com.google.protobuf.Descriptors.Descriptor
9663              getDescriptor() {
9664            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor;
9665          }
9666          
9667          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
9668              internalGetFieldAccessorTable() {
9669            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochRequestProto_fieldAccessorTable;
9670          }
9671          
9672          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.newBuilder()
9673          private Builder() {
9674            maybeForceBuilderInitialization();
9675          }
9676          
9677          private Builder(BuilderParent parent) {
9678            super(parent);
9679            maybeForceBuilderInitialization();
9680          }
9681          private void maybeForceBuilderInitialization() {
9682            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
9683              getJidFieldBuilder();
9684              getNsInfoFieldBuilder();
9685            }
9686          }
9687          private static Builder create() {
9688            return new Builder();
9689          }
9690          
9691          public Builder clear() {
9692            super.clear();
9693            if (jidBuilder_ == null) {
9694              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
9695            } else {
9696              jidBuilder_.clear();
9697            }
9698            bitField0_ = (bitField0_ & ~0x00000001);
9699            if (nsInfoBuilder_ == null) {
9700              nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
9701            } else {
9702              nsInfoBuilder_.clear();
9703            }
9704            bitField0_ = (bitField0_ & ~0x00000002);
9705            epoch_ = 0L;
9706            bitField0_ = (bitField0_ & ~0x00000004);
9707            return this;
9708          }
9709          
9710          public Builder clone() {
9711            return create().mergeFrom(buildPartial());
9712          }
9713          
9714          public com.google.protobuf.Descriptors.Descriptor
9715              getDescriptorForType() {
9716            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDescriptor();
9717          }
9718          
9719          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto getDefaultInstanceForType() {
9720            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance();
9721          }
9722          
9723          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto build() {
9724            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto result = buildPartial();
9725            if (!result.isInitialized()) {
9726              throw newUninitializedMessageException(result);
9727            }
9728            return result;
9729          }
9730          
9731          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto buildParsed()
9732              throws com.google.protobuf.InvalidProtocolBufferException {
9733            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto result = buildPartial();
9734            if (!result.isInitialized()) {
9735              throw newUninitializedMessageException(
9736                result).asInvalidProtocolBufferException();
9737            }
9738            return result;
9739          }
9740          
9741          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto buildPartial() {
9742            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto(this);
9743            int from_bitField0_ = bitField0_;
9744            int to_bitField0_ = 0;
9745            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
9746              to_bitField0_ |= 0x00000001;
9747            }
9748            if (jidBuilder_ == null) {
9749              result.jid_ = jid_;
9750            } else {
9751              result.jid_ = jidBuilder_.build();
9752            }
9753            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
9754              to_bitField0_ |= 0x00000002;
9755            }
9756            if (nsInfoBuilder_ == null) {
9757              result.nsInfo_ = nsInfo_;
9758            } else {
9759              result.nsInfo_ = nsInfoBuilder_.build();
9760            }
9761            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
9762              to_bitField0_ |= 0x00000004;
9763            }
9764            result.epoch_ = epoch_;
9765            result.bitField0_ = to_bitField0_;
9766            onBuilt();
9767            return result;
9768          }
9769          
9770          public Builder mergeFrom(com.google.protobuf.Message other) {
9771            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto) {
9772              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)other);
9773            } else {
9774              super.mergeFrom(other);
9775              return this;
9776            }
9777          }
9778          
9779          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto other) {
9780            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance()) return this;
9781            if (other.hasJid()) {
9782              mergeJid(other.getJid());
9783            }
9784            if (other.hasNsInfo()) {
9785              mergeNsInfo(other.getNsInfo());
9786            }
9787            if (other.hasEpoch()) {
9788              setEpoch(other.getEpoch());
9789            }
9790            this.mergeUnknownFields(other.getUnknownFields());
9791            return this;
9792          }
9793          
9794          public final boolean isInitialized() {
9795            if (!hasJid()) {
9796              
9797              return false;
9798            }
9799            if (!hasNsInfo()) {
9800              
9801              return false;
9802            }
9803            if (!hasEpoch()) {
9804              
9805              return false;
9806            }
9807            if (!getJid().isInitialized()) {
9808              
9809              return false;
9810            }
9811            if (!getNsInfo().isInitialized()) {
9812              
9813              return false;
9814            }
9815            return true;
9816          }
9817          
9818          public Builder mergeFrom(
9819              com.google.protobuf.CodedInputStream input,
9820              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
9821              throws java.io.IOException {
9822            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
9823              com.google.protobuf.UnknownFieldSet.newBuilder(
9824                this.getUnknownFields());
9825            while (true) {
9826              int tag = input.readTag();
9827              switch (tag) {
9828                case 0:
9829                  this.setUnknownFields(unknownFields.build());
9830                  onChanged();
9831                  return this;
9832                default: {
9833                  if (!parseUnknownField(input, unknownFields,
9834                                         extensionRegistry, tag)) {
9835                    this.setUnknownFields(unknownFields.build());
9836                    onChanged();
9837                    return this;
9838                  }
9839                  break;
9840                }
9841                case 10: {
9842                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder();
9843                  if (hasJid()) {
9844                    subBuilder.mergeFrom(getJid());
9845                  }
9846                  input.readMessage(subBuilder, extensionRegistry);
9847                  setJid(subBuilder.buildPartial());
9848                  break;
9849                }
9850                case 18: {
9851                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.newBuilder();
9852                  if (hasNsInfo()) {
9853                    subBuilder.mergeFrom(getNsInfo());
9854                  }
9855                  input.readMessage(subBuilder, extensionRegistry);
9856                  setNsInfo(subBuilder.buildPartial());
9857                  break;
9858                }
9859                case 24: {
9860                  bitField0_ |= 0x00000004;
9861                  epoch_ = input.readUInt64();
9862                  break;
9863                }
9864              }
9865            }
9866          }
9867          
9868          private int bitField0_;
9869          
9870          // required .hadoop.hdfs.JournalIdProto jid = 1;
9871          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
9872          private com.google.protobuf.SingleFieldBuilder<
9873              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
9874          public boolean hasJid() {
9875            return ((bitField0_ & 0x00000001) == 0x00000001);
9876          }
9877          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
9878            if (jidBuilder_ == null) {
9879              return jid_;
9880            } else {
9881              return jidBuilder_.getMessage();
9882            }
9883          }
9884          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
9885            if (jidBuilder_ == null) {
9886              if (value == null) {
9887                throw new NullPointerException();
9888              }
9889              jid_ = value;
9890              onChanged();
9891            } else {
9892              jidBuilder_.setMessage(value);
9893            }
9894            bitField0_ |= 0x00000001;
9895            return this;
9896          }
9897          public Builder setJid(
9898              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
9899            if (jidBuilder_ == null) {
9900              jid_ = builderForValue.build();
9901              onChanged();
9902            } else {
9903              jidBuilder_.setMessage(builderForValue.build());
9904            }
9905            bitField0_ |= 0x00000001;
9906            return this;
9907          }
9908          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
9909            if (jidBuilder_ == null) {
9910              if (((bitField0_ & 0x00000001) == 0x00000001) &&
9911                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
9912                jid_ =
9913                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
9914              } else {
9915                jid_ = value;
9916              }
9917              onChanged();
9918            } else {
9919              jidBuilder_.mergeFrom(value);
9920            }
9921            bitField0_ |= 0x00000001;
9922            return this;
9923          }
9924          public Builder clearJid() {
9925            if (jidBuilder_ == null) {
9926              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
9927              onChanged();
9928            } else {
9929              jidBuilder_.clear();
9930            }
9931            bitField0_ = (bitField0_ & ~0x00000001);
9932            return this;
9933          }
9934          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
9935            bitField0_ |= 0x00000001;
9936            onChanged();
9937            return getJidFieldBuilder().getBuilder();
9938          }
9939          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
9940            if (jidBuilder_ != null) {
9941              return jidBuilder_.getMessageOrBuilder();
9942            } else {
9943              return jid_;
9944            }
9945          }
9946          private com.google.protobuf.SingleFieldBuilder<
9947              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
9948              getJidFieldBuilder() {
9949            if (jidBuilder_ == null) {
9950              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
9951                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
9952                      jid_,
9953                      getParentForChildren(),
9954                      isClean());
9955              jid_ = null;
9956            }
9957            return jidBuilder_;
9958          }
9959          
9960          // required .hadoop.hdfs.NamespaceInfoProto nsInfo = 2;
9961          private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
9962          private com.google.protobuf.SingleFieldBuilder<
9963              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> nsInfoBuilder_;
9964          public boolean hasNsInfo() {
9965            return ((bitField0_ & 0x00000002) == 0x00000002);
9966          }
9967          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto getNsInfo() {
9968            if (nsInfoBuilder_ == null) {
9969              return nsInfo_;
9970            } else {
9971              return nsInfoBuilder_.getMessage();
9972            }
9973          }
9974          public Builder setNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
9975            if (nsInfoBuilder_ == null) {
9976              if (value == null) {
9977                throw new NullPointerException();
9978              }
9979              nsInfo_ = value;
9980              onChanged();
9981            } else {
9982              nsInfoBuilder_.setMessage(value);
9983            }
9984            bitField0_ |= 0x00000002;
9985            return this;
9986          }
9987          public Builder setNsInfo(
9988              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder builderForValue) {
9989            if (nsInfoBuilder_ == null) {
9990              nsInfo_ = builderForValue.build();
9991              onChanged();
9992            } else {
9993              nsInfoBuilder_.setMessage(builderForValue.build());
9994            }
9995            bitField0_ |= 0x00000002;
9996            return this;
9997          }
9998          public Builder mergeNsInfo(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto value) {
9999            if (nsInfoBuilder_ == null) {
10000              if (((bitField0_ & 0x00000002) == 0x00000002) &&
10001                  nsInfo_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance()) {
10002                nsInfo_ =
10003                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.newBuilder(nsInfo_).mergeFrom(value).buildPartial();
10004              } else {
10005                nsInfo_ = value;
10006              }
10007              onChanged();
10008            } else {
10009              nsInfoBuilder_.mergeFrom(value);
10010            }
10011            bitField0_ |= 0x00000002;
10012            return this;
10013          }
10014          public Builder clearNsInfo() {
10015            if (nsInfoBuilder_ == null) {
10016              nsInfo_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.getDefaultInstance();
10017              onChanged();
10018            } else {
10019              nsInfoBuilder_.clear();
10020            }
10021            bitField0_ = (bitField0_ & ~0x00000002);
10022            return this;
10023          }
10024          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder getNsInfoBuilder() {
10025            bitField0_ |= 0x00000002;
10026            onChanged();
10027            return getNsInfoFieldBuilder().getBuilder();
10028          }
10029          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder getNsInfoOrBuilder() {
10030            if (nsInfoBuilder_ != null) {
10031              return nsInfoBuilder_.getMessageOrBuilder();
10032            } else {
10033              return nsInfo_;
10034            }
10035          }
10036          private com.google.protobuf.SingleFieldBuilder<
10037              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder> 
10038              getNsInfoFieldBuilder() {
10039            if (nsInfoBuilder_ == null) {
10040              nsInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
10041                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.NamespaceInfoProtoOrBuilder>(
10042                      nsInfo_,
10043                      getParentForChildren(),
10044                      isClean());
10045              nsInfo_ = null;
10046            }
10047            return nsInfoBuilder_;
10048          }
10049          
10050          // required uint64 epoch = 3;
10051          private long epoch_ ;
10052          public boolean hasEpoch() {
10053            return ((bitField0_ & 0x00000004) == 0x00000004);
10054          }
10055          public long getEpoch() {
10056            return epoch_;
10057          }
10058          public Builder setEpoch(long value) {
10059            bitField0_ |= 0x00000004;
10060            epoch_ = value;
10061            onChanged();
10062            return this;
10063          }
10064          public Builder clearEpoch() {
10065            bitField0_ = (bitField0_ & ~0x00000004);
10066            epoch_ = 0L;
10067            onChanged();
10068            return this;
10069          }
10070          
10071          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.NewEpochRequestProto)
10072        }
10073        
10074        static {
10075          defaultInstance = new NewEpochRequestProto(true);
10076          defaultInstance.initFields();
10077        }
10078        
10079        // @@protoc_insertion_point(class_scope:hadoop.hdfs.NewEpochRequestProto)
10080      }
10081      
10082      public interface NewEpochResponseProtoOrBuilder
10083          extends com.google.protobuf.MessageOrBuilder {
10084        
10085        // optional uint64 lastSegmentTxId = 1;
10086        boolean hasLastSegmentTxId();
10087        long getLastSegmentTxId();
10088      }
10089      public static final class NewEpochResponseProto extends
10090          com.google.protobuf.GeneratedMessage
10091          implements NewEpochResponseProtoOrBuilder {
10092        // Use NewEpochResponseProto.newBuilder() to construct.
10093        private NewEpochResponseProto(Builder builder) {
10094          super(builder);
10095        }
10096        private NewEpochResponseProto(boolean noInit) {}
10097        
10098        private static final NewEpochResponseProto defaultInstance;
10099        public static NewEpochResponseProto getDefaultInstance() {
10100          return defaultInstance;
10101        }
10102        
10103        public NewEpochResponseProto getDefaultInstanceForType() {
10104          return defaultInstance;
10105        }
10106        
10107        public static final com.google.protobuf.Descriptors.Descriptor
10108            getDescriptor() {
10109          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor;
10110        }
10111        
10112        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
10113            internalGetFieldAccessorTable() {
10114          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochResponseProto_fieldAccessorTable;
10115        }
10116        
10117        private int bitField0_;
10118        // optional uint64 lastSegmentTxId = 1;
10119        public static final int LASTSEGMENTTXID_FIELD_NUMBER = 1;
10120        private long lastSegmentTxId_;
10121        public boolean hasLastSegmentTxId() {
10122          return ((bitField0_ & 0x00000001) == 0x00000001);
10123        }
10124        public long getLastSegmentTxId() {
10125          return lastSegmentTxId_;
10126        }
10127        
10128        private void initFields() {
10129          lastSegmentTxId_ = 0L;
10130        }
10131        private byte memoizedIsInitialized = -1;
10132        public final boolean isInitialized() {
10133          byte isInitialized = memoizedIsInitialized;
10134          if (isInitialized != -1) return isInitialized == 1;
10135          
10136          memoizedIsInitialized = 1;
10137          return true;
10138        }
10139        
10140        public void writeTo(com.google.protobuf.CodedOutputStream output)
10141                            throws java.io.IOException {
10142          getSerializedSize();
10143          if (((bitField0_ & 0x00000001) == 0x00000001)) {
10144            output.writeUInt64(1, lastSegmentTxId_);
10145          }
10146          getUnknownFields().writeTo(output);
10147        }
10148        
10149        private int memoizedSerializedSize = -1;
10150        public int getSerializedSize() {
10151          int size = memoizedSerializedSize;
10152          if (size != -1) return size;
10153        
10154          size = 0;
10155          if (((bitField0_ & 0x00000001) == 0x00000001)) {
10156            size += com.google.protobuf.CodedOutputStream
10157              .computeUInt64Size(1, lastSegmentTxId_);
10158          }
10159          size += getUnknownFields().getSerializedSize();
10160          memoizedSerializedSize = size;
10161          return size;
10162        }
10163        
10164        private static final long serialVersionUID = 0L;
10165        @java.lang.Override
10166        protected java.lang.Object writeReplace()
10167            throws java.io.ObjectStreamException {
10168          return super.writeReplace();
10169        }
10170        
10171        @java.lang.Override
10172        public boolean equals(final java.lang.Object obj) {
10173          if (obj == this) {
10174           return true;
10175          }
10176          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto)) {
10177            return super.equals(obj);
10178          }
10179          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) obj;
10180          
10181          boolean result = true;
10182          result = result && (hasLastSegmentTxId() == other.hasLastSegmentTxId());
10183          if (hasLastSegmentTxId()) {
10184            result = result && (getLastSegmentTxId()
10185                == other.getLastSegmentTxId());
10186          }
10187          result = result &&
10188              getUnknownFields().equals(other.getUnknownFields());
10189          return result;
10190        }
10191        
10192        @java.lang.Override
10193        public int hashCode() {
10194          int hash = 41;
10195          hash = (19 * hash) + getDescriptorForType().hashCode();
10196          if (hasLastSegmentTxId()) {
10197            hash = (37 * hash) + LASTSEGMENTTXID_FIELD_NUMBER;
10198            hash = (53 * hash) + hashLong(getLastSegmentTxId());
10199          }
10200          hash = (29 * hash) + getUnknownFields().hashCode();
10201          return hash;
10202        }
10203        
10204        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
10205            com.google.protobuf.ByteString data)
10206            throws com.google.protobuf.InvalidProtocolBufferException {
10207          return newBuilder().mergeFrom(data).buildParsed();
10208        }
10209        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
10210            com.google.protobuf.ByteString data,
10211            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10212            throws com.google.protobuf.InvalidProtocolBufferException {
10213          return newBuilder().mergeFrom(data, extensionRegistry)
10214                   .buildParsed();
10215        }
10216        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(byte[] data)
10217            throws com.google.protobuf.InvalidProtocolBufferException {
10218          return newBuilder().mergeFrom(data).buildParsed();
10219        }
10220        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
10221            byte[] data,
10222            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10223            throws com.google.protobuf.InvalidProtocolBufferException {
10224          return newBuilder().mergeFrom(data, extensionRegistry)
10225                   .buildParsed();
10226        }
10227        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(java.io.InputStream input)
10228            throws java.io.IOException {
10229          return newBuilder().mergeFrom(input).buildParsed();
10230        }
10231        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
10232            java.io.InputStream input,
10233            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10234            throws java.io.IOException {
10235          return newBuilder().mergeFrom(input, extensionRegistry)
10236                   .buildParsed();
10237        }
10238        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseDelimitedFrom(java.io.InputStream input)
10239            throws java.io.IOException {
10240          Builder builder = newBuilder();
10241          if (builder.mergeDelimitedFrom(input)) {
10242            return builder.buildParsed();
10243          } else {
10244            return null;
10245          }
10246        }
10247        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseDelimitedFrom(
10248            java.io.InputStream input,
10249            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10250            throws java.io.IOException {
10251          Builder builder = newBuilder();
10252          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
10253            return builder.buildParsed();
10254          } else {
10255            return null;
10256          }
10257        }
10258        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
10259            com.google.protobuf.CodedInputStream input)
10260            throws java.io.IOException {
10261          return newBuilder().mergeFrom(input).buildParsed();
10262        }
10263        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto parseFrom(
10264            com.google.protobuf.CodedInputStream input,
10265            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10266            throws java.io.IOException {
10267          return newBuilder().mergeFrom(input, extensionRegistry)
10268                   .buildParsed();
10269        }
10270        
10271        public static Builder newBuilder() { return Builder.create(); }
10272        public Builder newBuilderForType() { return newBuilder(); }
10273        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto prototype) {
10274          return newBuilder().mergeFrom(prototype);
10275        }
10276        public Builder toBuilder() { return newBuilder(this); }
10277        
10278        @java.lang.Override
10279        protected Builder newBuilderForType(
10280            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
10281          Builder builder = new Builder(parent);
10282          return builder;
10283        }
10284        public static final class Builder extends
10285            com.google.protobuf.GeneratedMessage.Builder<Builder>
10286           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProtoOrBuilder {
10287          public static final com.google.protobuf.Descriptors.Descriptor
10288              getDescriptor() {
10289            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor;
10290          }
10291          
10292          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
10293              internalGetFieldAccessorTable() {
10294            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_NewEpochResponseProto_fieldAccessorTable;
10295          }
10296          
10297          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.newBuilder()
10298          private Builder() {
10299            maybeForceBuilderInitialization();
10300          }
10301          
10302          private Builder(BuilderParent parent) {
10303            super(parent);
10304            maybeForceBuilderInitialization();
10305          }
10306          private void maybeForceBuilderInitialization() {
10307            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
10308            }
10309          }
10310          private static Builder create() {
10311            return new Builder();
10312          }
10313          
10314          public Builder clear() {
10315            super.clear();
10316            lastSegmentTxId_ = 0L;
10317            bitField0_ = (bitField0_ & ~0x00000001);
10318            return this;
10319          }
10320          
10321          public Builder clone() {
10322            return create().mergeFrom(buildPartial());
10323          }
10324          
10325          public com.google.protobuf.Descriptors.Descriptor
10326              getDescriptorForType() {
10327            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDescriptor();
10328          }
10329          
10330          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto getDefaultInstanceForType() {
10331            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance();
10332          }
10333          
10334          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto build() {
10335            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto result = buildPartial();
10336            if (!result.isInitialized()) {
10337              throw newUninitializedMessageException(result);
10338            }
10339            return result;
10340          }
10341          
10342          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto buildParsed()
10343              throws com.google.protobuf.InvalidProtocolBufferException {
10344            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto result = buildPartial();
10345            if (!result.isInitialized()) {
10346              throw newUninitializedMessageException(
10347                result).asInvalidProtocolBufferException();
10348            }
10349            return result;
10350          }
10351          
10352          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto buildPartial() {
10353            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto(this);
10354            int from_bitField0_ = bitField0_;
10355            int to_bitField0_ = 0;
10356            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
10357              to_bitField0_ |= 0x00000001;
10358            }
10359            result.lastSegmentTxId_ = lastSegmentTxId_;
10360            result.bitField0_ = to_bitField0_;
10361            onBuilt();
10362            return result;
10363          }
10364          
10365          public Builder mergeFrom(com.google.protobuf.Message other) {
10366            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) {
10367              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto)other);
10368            } else {
10369              super.mergeFrom(other);
10370              return this;
10371            }
10372          }
10373          
10374          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto other) {
10375            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance()) return this;
10376            if (other.hasLastSegmentTxId()) {
10377              setLastSegmentTxId(other.getLastSegmentTxId());
10378            }
10379            this.mergeUnknownFields(other.getUnknownFields());
10380            return this;
10381          }
10382          
10383          public final boolean isInitialized() {
10384            return true;
10385          }
10386          
10387          public Builder mergeFrom(
10388              com.google.protobuf.CodedInputStream input,
10389              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10390              throws java.io.IOException {
10391            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
10392              com.google.protobuf.UnknownFieldSet.newBuilder(
10393                this.getUnknownFields());
10394            while (true) {
10395              int tag = input.readTag();
10396              switch (tag) {
10397                case 0:
10398                  this.setUnknownFields(unknownFields.build());
10399                  onChanged();
10400                  return this;
10401                default: {
10402                  if (!parseUnknownField(input, unknownFields,
10403                                         extensionRegistry, tag)) {
10404                    this.setUnknownFields(unknownFields.build());
10405                    onChanged();
10406                    return this;
10407                  }
10408                  break;
10409                }
10410                case 8: {
10411                  bitField0_ |= 0x00000001;
10412                  lastSegmentTxId_ = input.readUInt64();
10413                  break;
10414                }
10415              }
10416            }
10417          }
10418          
10419          private int bitField0_;
10420          
10421          // optional uint64 lastSegmentTxId = 1;
10422          private long lastSegmentTxId_ ;
10423          public boolean hasLastSegmentTxId() {
10424            return ((bitField0_ & 0x00000001) == 0x00000001);
10425          }
10426          public long getLastSegmentTxId() {
10427            return lastSegmentTxId_;
10428          }
10429          public Builder setLastSegmentTxId(long value) {
10430            bitField0_ |= 0x00000001;
10431            lastSegmentTxId_ = value;
10432            onChanged();
10433            return this;
10434          }
10435          public Builder clearLastSegmentTxId() {
10436            bitField0_ = (bitField0_ & ~0x00000001);
10437            lastSegmentTxId_ = 0L;
10438            onChanged();
10439            return this;
10440          }
10441          
10442          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.NewEpochResponseProto)
10443        }
10444        
10445        static {
10446          defaultInstance = new NewEpochResponseProto(true);
10447          defaultInstance.initFields();
10448        }
10449        
10450        // @@protoc_insertion_point(class_scope:hadoop.hdfs.NewEpochResponseProto)
10451      }
10452      
10453      public interface GetEditLogManifestRequestProtoOrBuilder
10454          extends com.google.protobuf.MessageOrBuilder {
10455        
10456        // required .hadoop.hdfs.JournalIdProto jid = 1;
10457        boolean hasJid();
10458        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid();
10459        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder();
10460        
10461        // required uint64 sinceTxId = 2;
10462        boolean hasSinceTxId();
10463        long getSinceTxId();
10464      }
10465      public static final class GetEditLogManifestRequestProto extends
10466          com.google.protobuf.GeneratedMessage
10467          implements GetEditLogManifestRequestProtoOrBuilder {
10468        // Use GetEditLogManifestRequestProto.newBuilder() to construct.
10469        private GetEditLogManifestRequestProto(Builder builder) {
10470          super(builder);
10471        }
10472        private GetEditLogManifestRequestProto(boolean noInit) {}
10473        
10474        private static final GetEditLogManifestRequestProto defaultInstance;
10475        public static GetEditLogManifestRequestProto getDefaultInstance() {
10476          return defaultInstance;
10477        }
10478        
10479        public GetEditLogManifestRequestProto getDefaultInstanceForType() {
10480          return defaultInstance;
10481        }
10482        
10483        public static final com.google.protobuf.Descriptors.Descriptor
10484            getDescriptor() {
10485          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor;
10486        }
10487        
10488        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
10489            internalGetFieldAccessorTable() {
10490          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_fieldAccessorTable;
10491        }
10492        
10493        private int bitField0_;
10494        // required .hadoop.hdfs.JournalIdProto jid = 1;
10495        public static final int JID_FIELD_NUMBER = 1;
10496        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_;
10497        public boolean hasJid() {
10498          return ((bitField0_ & 0x00000001) == 0x00000001);
10499        }
10500        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
10501          return jid_;
10502        }
10503        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
10504          return jid_;
10505        }
10506        
10507        // required uint64 sinceTxId = 2;
10508        public static final int SINCETXID_FIELD_NUMBER = 2;
10509        private long sinceTxId_;
10510        public boolean hasSinceTxId() {
10511          return ((bitField0_ & 0x00000002) == 0x00000002);
10512        }
10513        public long getSinceTxId() {
10514          return sinceTxId_;
10515        }
10516        
10517        private void initFields() {
10518          jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
10519          sinceTxId_ = 0L;
10520        }
10521        private byte memoizedIsInitialized = -1;
10522        public final boolean isInitialized() {
10523          byte isInitialized = memoizedIsInitialized;
10524          if (isInitialized != -1) return isInitialized == 1;
10525          
10526          if (!hasJid()) {
10527            memoizedIsInitialized = 0;
10528            return false;
10529          }
10530          if (!hasSinceTxId()) {
10531            memoizedIsInitialized = 0;
10532            return false;
10533          }
10534          if (!getJid().isInitialized()) {
10535            memoizedIsInitialized = 0;
10536            return false;
10537          }
10538          memoizedIsInitialized = 1;
10539          return true;
10540        }
10541        
10542        public void writeTo(com.google.protobuf.CodedOutputStream output)
10543                            throws java.io.IOException {
10544          getSerializedSize();
10545          if (((bitField0_ & 0x00000001) == 0x00000001)) {
10546            output.writeMessage(1, jid_);
10547          }
10548          if (((bitField0_ & 0x00000002) == 0x00000002)) {
10549            output.writeUInt64(2, sinceTxId_);
10550          }
10551          getUnknownFields().writeTo(output);
10552        }
10553        
10554        private int memoizedSerializedSize = -1;
10555        public int getSerializedSize() {
10556          int size = memoizedSerializedSize;
10557          if (size != -1) return size;
10558        
10559          size = 0;
10560          if (((bitField0_ & 0x00000001) == 0x00000001)) {
10561            size += com.google.protobuf.CodedOutputStream
10562              .computeMessageSize(1, jid_);
10563          }
10564          if (((bitField0_ & 0x00000002) == 0x00000002)) {
10565            size += com.google.protobuf.CodedOutputStream
10566              .computeUInt64Size(2, sinceTxId_);
10567          }
10568          size += getUnknownFields().getSerializedSize();
10569          memoizedSerializedSize = size;
10570          return size;
10571        }
10572        
10573        private static final long serialVersionUID = 0L;
10574        @java.lang.Override
10575        protected java.lang.Object writeReplace()
10576            throws java.io.ObjectStreamException {
10577          return super.writeReplace();
10578        }
10579        
10580        @java.lang.Override
10581        public boolean equals(final java.lang.Object obj) {
10582          if (obj == this) {
10583           return true;
10584          }
10585          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)) {
10586            return super.equals(obj);
10587          }
10588          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto) obj;
10589          
10590          boolean result = true;
10591          result = result && (hasJid() == other.hasJid());
10592          if (hasJid()) {
10593            result = result && getJid()
10594                .equals(other.getJid());
10595          }
10596          result = result && (hasSinceTxId() == other.hasSinceTxId());
10597          if (hasSinceTxId()) {
10598            result = result && (getSinceTxId()
10599                == other.getSinceTxId());
10600          }
10601          result = result &&
10602              getUnknownFields().equals(other.getUnknownFields());
10603          return result;
10604        }
10605        
10606        @java.lang.Override
10607        public int hashCode() {
10608          int hash = 41;
10609          hash = (19 * hash) + getDescriptorForType().hashCode();
10610          if (hasJid()) {
10611            hash = (37 * hash) + JID_FIELD_NUMBER;
10612            hash = (53 * hash) + getJid().hashCode();
10613          }
10614          if (hasSinceTxId()) {
10615            hash = (37 * hash) + SINCETXID_FIELD_NUMBER;
10616            hash = (53 * hash) + hashLong(getSinceTxId());
10617          }
10618          hash = (29 * hash) + getUnknownFields().hashCode();
10619          return hash;
10620        }
10621        
10622        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
10623            com.google.protobuf.ByteString data)
10624            throws com.google.protobuf.InvalidProtocolBufferException {
10625          return newBuilder().mergeFrom(data).buildParsed();
10626        }
10627        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
10628            com.google.protobuf.ByteString data,
10629            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10630            throws com.google.protobuf.InvalidProtocolBufferException {
10631          return newBuilder().mergeFrom(data, extensionRegistry)
10632                   .buildParsed();
10633        }
10634        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(byte[] data)
10635            throws com.google.protobuf.InvalidProtocolBufferException {
10636          return newBuilder().mergeFrom(data).buildParsed();
10637        }
10638        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
10639            byte[] data,
10640            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10641            throws com.google.protobuf.InvalidProtocolBufferException {
10642          return newBuilder().mergeFrom(data, extensionRegistry)
10643                   .buildParsed();
10644        }
10645        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(java.io.InputStream input)
10646            throws java.io.IOException {
10647          return newBuilder().mergeFrom(input).buildParsed();
10648        }
10649        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
10650            java.io.InputStream input,
10651            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10652            throws java.io.IOException {
10653          return newBuilder().mergeFrom(input, extensionRegistry)
10654                   .buildParsed();
10655        }
10656        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseDelimitedFrom(java.io.InputStream input)
10657            throws java.io.IOException {
10658          Builder builder = newBuilder();
10659          if (builder.mergeDelimitedFrom(input)) {
10660            return builder.buildParsed();
10661          } else {
10662            return null;
10663          }
10664        }
10665        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseDelimitedFrom(
10666            java.io.InputStream input,
10667            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10668            throws java.io.IOException {
10669          Builder builder = newBuilder();
10670          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
10671            return builder.buildParsed();
10672          } else {
10673            return null;
10674          }
10675        }
10676        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
10677            com.google.protobuf.CodedInputStream input)
10678            throws java.io.IOException {
10679          return newBuilder().mergeFrom(input).buildParsed();
10680        }
10681        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto parseFrom(
10682            com.google.protobuf.CodedInputStream input,
10683            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10684            throws java.io.IOException {
10685          return newBuilder().mergeFrom(input, extensionRegistry)
10686                   .buildParsed();
10687        }
10688        
10689        public static Builder newBuilder() { return Builder.create(); }
10690        public Builder newBuilderForType() { return newBuilder(); }
10691        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto prototype) {
10692          return newBuilder().mergeFrom(prototype);
10693        }
10694        public Builder toBuilder() { return newBuilder(this); }
10695        
10696        @java.lang.Override
10697        protected Builder newBuilderForType(
10698            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
10699          Builder builder = new Builder(parent);
10700          return builder;
10701        }
10702        public static final class Builder extends
10703            com.google.protobuf.GeneratedMessage.Builder<Builder>
10704           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProtoOrBuilder {
10705          public static final com.google.protobuf.Descriptors.Descriptor
10706              getDescriptor() {
10707            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor;
10708          }
10709          
10710          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
10711              internalGetFieldAccessorTable() {
10712            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_fieldAccessorTable;
10713          }
10714          
10715          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.newBuilder()
10716          private Builder() {
10717            maybeForceBuilderInitialization();
10718          }
10719          
10720          private Builder(BuilderParent parent) {
10721            super(parent);
10722            maybeForceBuilderInitialization();
10723          }
10724          private void maybeForceBuilderInitialization() {
10725            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
10726              getJidFieldBuilder();
10727            }
10728          }
10729          private static Builder create() {
10730            return new Builder();
10731          }
10732          
10733          public Builder clear() {
10734            super.clear();
10735            if (jidBuilder_ == null) {
10736              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
10737            } else {
10738              jidBuilder_.clear();
10739            }
10740            bitField0_ = (bitField0_ & ~0x00000001);
10741            sinceTxId_ = 0L;
10742            bitField0_ = (bitField0_ & ~0x00000002);
10743            return this;
10744          }
10745          
10746          public Builder clone() {
10747            return create().mergeFrom(buildPartial());
10748          }
10749          
10750          public com.google.protobuf.Descriptors.Descriptor
10751              getDescriptorForType() {
10752            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDescriptor();
10753          }
10754          
10755          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto getDefaultInstanceForType() {
10756            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance();
10757          }
10758          
10759          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto build() {
10760            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto result = buildPartial();
10761            if (!result.isInitialized()) {
10762              throw newUninitializedMessageException(result);
10763            }
10764            return result;
10765          }
10766          
10767          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto buildParsed()
10768              throws com.google.protobuf.InvalidProtocolBufferException {
10769            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto result = buildPartial();
10770            if (!result.isInitialized()) {
10771              throw newUninitializedMessageException(
10772                result).asInvalidProtocolBufferException();
10773            }
10774            return result;
10775          }
10776          
10777          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto buildPartial() {
10778            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto(this);
10779            int from_bitField0_ = bitField0_;
10780            int to_bitField0_ = 0;
10781            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
10782              to_bitField0_ |= 0x00000001;
10783            }
10784            if (jidBuilder_ == null) {
10785              result.jid_ = jid_;
10786            } else {
10787              result.jid_ = jidBuilder_.build();
10788            }
10789            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
10790              to_bitField0_ |= 0x00000002;
10791            }
10792            result.sinceTxId_ = sinceTxId_;
10793            result.bitField0_ = to_bitField0_;
10794            onBuilt();
10795            return result;
10796          }
10797          
10798          public Builder mergeFrom(com.google.protobuf.Message other) {
10799            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto) {
10800              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)other);
10801            } else {
10802              super.mergeFrom(other);
10803              return this;
10804            }
10805          }
10806          
10807          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto other) {
10808            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance()) return this;
10809            if (other.hasJid()) {
10810              mergeJid(other.getJid());
10811            }
10812            if (other.hasSinceTxId()) {
10813              setSinceTxId(other.getSinceTxId());
10814            }
10815            this.mergeUnknownFields(other.getUnknownFields());
10816            return this;
10817          }
10818          
10819          public final boolean isInitialized() {
10820            if (!hasJid()) {
10821              
10822              return false;
10823            }
10824            if (!hasSinceTxId()) {
10825              
10826              return false;
10827            }
10828            if (!getJid().isInitialized()) {
10829              
10830              return false;
10831            }
10832            return true;
10833          }
10834          
10835          public Builder mergeFrom(
10836              com.google.protobuf.CodedInputStream input,
10837              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
10838              throws java.io.IOException {
10839            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
10840              com.google.protobuf.UnknownFieldSet.newBuilder(
10841                this.getUnknownFields());
10842            while (true) {
10843              int tag = input.readTag();
10844              switch (tag) {
10845                case 0:
10846                  this.setUnknownFields(unknownFields.build());
10847                  onChanged();
10848                  return this;
10849                default: {
10850                  if (!parseUnknownField(input, unknownFields,
10851                                         extensionRegistry, tag)) {
10852                    this.setUnknownFields(unknownFields.build());
10853                    onChanged();
10854                    return this;
10855                  }
10856                  break;
10857                }
10858                case 10: {
10859                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder();
10860                  if (hasJid()) {
10861                    subBuilder.mergeFrom(getJid());
10862                  }
10863                  input.readMessage(subBuilder, extensionRegistry);
10864                  setJid(subBuilder.buildPartial());
10865                  break;
10866                }
10867                case 16: {
10868                  bitField0_ |= 0x00000002;
10869                  sinceTxId_ = input.readUInt64();
10870                  break;
10871                }
10872              }
10873            }
10874          }
10875          
10876          private int bitField0_;
10877          
10878          // required .hadoop.hdfs.JournalIdProto jid = 1;
10879          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
10880          private com.google.protobuf.SingleFieldBuilder<
10881              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> jidBuilder_;
10882          public boolean hasJid() {
10883            return ((bitField0_ & 0x00000001) == 0x00000001);
10884          }
10885          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto getJid() {
10886            if (jidBuilder_ == null) {
10887              return jid_;
10888            } else {
10889              return jidBuilder_.getMessage();
10890            }
10891          }
10892          public Builder setJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
10893            if (jidBuilder_ == null) {
10894              if (value == null) {
10895                throw new NullPointerException();
10896              }
10897              jid_ = value;
10898              onChanged();
10899            } else {
10900              jidBuilder_.setMessage(value);
10901            }
10902            bitField0_ |= 0x00000001;
10903            return this;
10904          }
10905          public Builder setJid(
10906              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder builderForValue) {
10907            if (jidBuilder_ == null) {
10908              jid_ = builderForValue.build();
10909              onChanged();
10910            } else {
10911              jidBuilder_.setMessage(builderForValue.build());
10912            }
10913            bitField0_ |= 0x00000001;
10914            return this;
10915          }
10916          public Builder mergeJid(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto value) {
10917            if (jidBuilder_ == null) {
10918              if (((bitField0_ & 0x00000001) == 0x00000001) &&
10919                  jid_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance()) {
10920                jid_ =
10921                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.newBuilder(jid_).mergeFrom(value).buildPartial();
10922              } else {
10923                jid_ = value;
10924              }
10925              onChanged();
10926            } else {
10927              jidBuilder_.mergeFrom(value);
10928            }
10929            bitField0_ |= 0x00000001;
10930            return this;
10931          }
10932          public Builder clearJid() {
10933            if (jidBuilder_ == null) {
10934              jid_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.getDefaultInstance();
10935              onChanged();
10936            } else {
10937              jidBuilder_.clear();
10938            }
10939            bitField0_ = (bitField0_ & ~0x00000001);
10940            return this;
10941          }
10942          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder getJidBuilder() {
10943            bitField0_ |= 0x00000001;
10944            onChanged();
10945            return getJidFieldBuilder().getBuilder();
10946          }
10947          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder getJidOrBuilder() {
10948            if (jidBuilder_ != null) {
10949              return jidBuilder_.getMessageOrBuilder();
10950            } else {
10951              return jid_;
10952            }
10953          }
10954          private com.google.protobuf.SingleFieldBuilder<
10955              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder> 
10956              getJidFieldBuilder() {
10957            if (jidBuilder_ == null) {
10958              jidBuilder_ = new com.google.protobuf.SingleFieldBuilder<
10959                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProtoOrBuilder>(
10960                      jid_,
10961                      getParentForChildren(),
10962                      isClean());
10963              jid_ = null;
10964            }
10965            return jidBuilder_;
10966          }
10967          
10968          // required uint64 sinceTxId = 2;
10969          private long sinceTxId_ ;
10970          public boolean hasSinceTxId() {
10971            return ((bitField0_ & 0x00000002) == 0x00000002);
10972          }
10973          public long getSinceTxId() {
10974            return sinceTxId_;
10975          }
10976          public Builder setSinceTxId(long value) {
10977            bitField0_ |= 0x00000002;
10978            sinceTxId_ = value;
10979            onChanged();
10980            return this;
10981          }
10982          public Builder clearSinceTxId() {
10983            bitField0_ = (bitField0_ & ~0x00000002);
10984            sinceTxId_ = 0L;
10985            onChanged();
10986            return this;
10987          }
10988          
10989          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetEditLogManifestRequestProto)
10990        }
10991        
10992        static {
10993          defaultInstance = new GetEditLogManifestRequestProto(true);
10994          defaultInstance.initFields();
10995        }
10996        
10997        // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetEditLogManifestRequestProto)
10998      }
10999      
11000      public interface GetEditLogManifestResponseProtoOrBuilder
11001          extends com.google.protobuf.MessageOrBuilder {
11002        
11003        // required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;
11004        boolean hasManifest();
11005        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getManifest();
11006        org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder();
11007        
11008        // required uint32 httpPort = 2;
11009        boolean hasHttpPort();
11010        int getHttpPort();
11011      }
11012      public static final class GetEditLogManifestResponseProto extends
11013          com.google.protobuf.GeneratedMessage
11014          implements GetEditLogManifestResponseProtoOrBuilder {
11015        // Use GetEditLogManifestResponseProto.newBuilder() to construct.
11016        private GetEditLogManifestResponseProto(Builder builder) {
11017          super(builder);
11018        }
11019        private GetEditLogManifestResponseProto(boolean noInit) {}
11020        
11021        private static final GetEditLogManifestResponseProto defaultInstance;
11022        public static GetEditLogManifestResponseProto getDefaultInstance() {
11023          return defaultInstance;
11024        }
11025        
11026        public GetEditLogManifestResponseProto getDefaultInstanceForType() {
11027          return defaultInstance;
11028        }
11029        
11030        public static final com.google.protobuf.Descriptors.Descriptor
11031            getDescriptor() {
11032          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor;
11033        }
11034        
11035        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11036            internalGetFieldAccessorTable() {
11037          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_fieldAccessorTable;
11038        }
11039        
11040        private int bitField0_;
11041        // required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;
11042        public static final int MANIFEST_FIELD_NUMBER = 1;
11043        private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto manifest_;
11044        public boolean hasManifest() {
11045          return ((bitField0_ & 0x00000001) == 0x00000001);
11046        }
11047        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getManifest() {
11048          return manifest_;
11049        }
11050        public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder() {
11051          return manifest_;
11052        }
11053        
11054        // required uint32 httpPort = 2;
11055        public static final int HTTPPORT_FIELD_NUMBER = 2;
11056        private int httpPort_;
11057        public boolean hasHttpPort() {
11058          return ((bitField0_ & 0x00000002) == 0x00000002);
11059        }
11060        public int getHttpPort() {
11061          return httpPort_;
11062        }
11063        
11064        private void initFields() {
11065          manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
11066          httpPort_ = 0;
11067        }
11068        private byte memoizedIsInitialized = -1;
11069        public final boolean isInitialized() {
11070          byte isInitialized = memoizedIsInitialized;
11071          if (isInitialized != -1) return isInitialized == 1;
11072          
11073          if (!hasManifest()) {
11074            memoizedIsInitialized = 0;
11075            return false;
11076          }
11077          if (!hasHttpPort()) {
11078            memoizedIsInitialized = 0;
11079            return false;
11080          }
11081          if (!getManifest().isInitialized()) {
11082            memoizedIsInitialized = 0;
11083            return false;
11084          }
11085          memoizedIsInitialized = 1;
11086          return true;
11087        }
11088        
11089        public void writeTo(com.google.protobuf.CodedOutputStream output)
11090                            throws java.io.IOException {
11091          getSerializedSize();
11092          if (((bitField0_ & 0x00000001) == 0x00000001)) {
11093            output.writeMessage(1, manifest_);
11094          }
11095          if (((bitField0_ & 0x00000002) == 0x00000002)) {
11096            output.writeUInt32(2, httpPort_);
11097          }
11098          getUnknownFields().writeTo(output);
11099        }
11100        
11101        private int memoizedSerializedSize = -1;
11102        public int getSerializedSize() {
11103          int size = memoizedSerializedSize;
11104          if (size != -1) return size;
11105        
11106          size = 0;
11107          if (((bitField0_ & 0x00000001) == 0x00000001)) {
11108            size += com.google.protobuf.CodedOutputStream
11109              .computeMessageSize(1, manifest_);
11110          }
11111          if (((bitField0_ & 0x00000002) == 0x00000002)) {
11112            size += com.google.protobuf.CodedOutputStream
11113              .computeUInt32Size(2, httpPort_);
11114          }
11115          size += getUnknownFields().getSerializedSize();
11116          memoizedSerializedSize = size;
11117          return size;
11118        }
11119        
11120        private static final long serialVersionUID = 0L;
11121        @java.lang.Override
11122        protected java.lang.Object writeReplace()
11123            throws java.io.ObjectStreamException {
11124          return super.writeReplace();
11125        }
11126        
11127        @java.lang.Override
11128        public boolean equals(final java.lang.Object obj) {
11129          if (obj == this) {
11130           return true;
11131          }
11132          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto)) {
11133            return super.equals(obj);
11134          }
11135          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) obj;
11136          
11137          boolean result = true;
11138          result = result && (hasManifest() == other.hasManifest());
11139          if (hasManifest()) {
11140            result = result && getManifest()
11141                .equals(other.getManifest());
11142          }
11143          result = result && (hasHttpPort() == other.hasHttpPort());
11144          if (hasHttpPort()) {
11145            result = result && (getHttpPort()
11146                == other.getHttpPort());
11147          }
11148          result = result &&
11149              getUnknownFields().equals(other.getUnknownFields());
11150          return result;
11151        }
11152        
11153        @java.lang.Override
11154        public int hashCode() {
11155          int hash = 41;
11156          hash = (19 * hash) + getDescriptorForType().hashCode();
11157          if (hasManifest()) {
11158            hash = (37 * hash) + MANIFEST_FIELD_NUMBER;
11159            hash = (53 * hash) + getManifest().hashCode();
11160          }
11161          if (hasHttpPort()) {
11162            hash = (37 * hash) + HTTPPORT_FIELD_NUMBER;
11163            hash = (53 * hash) + getHttpPort();
11164          }
11165          hash = (29 * hash) + getUnknownFields().hashCode();
11166          return hash;
11167        }
11168        
11169        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
11170            com.google.protobuf.ByteString data)
11171            throws com.google.protobuf.InvalidProtocolBufferException {
11172          return newBuilder().mergeFrom(data).buildParsed();
11173        }
11174        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
11175            com.google.protobuf.ByteString data,
11176            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11177            throws com.google.protobuf.InvalidProtocolBufferException {
11178          return newBuilder().mergeFrom(data, extensionRegistry)
11179                   .buildParsed();
11180        }
11181        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(byte[] data)
11182            throws com.google.protobuf.InvalidProtocolBufferException {
11183          return newBuilder().mergeFrom(data).buildParsed();
11184        }
11185        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
11186            byte[] data,
11187            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11188            throws com.google.protobuf.InvalidProtocolBufferException {
11189          return newBuilder().mergeFrom(data, extensionRegistry)
11190                   .buildParsed();
11191        }
11192        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(java.io.InputStream input)
11193            throws java.io.IOException {
11194          return newBuilder().mergeFrom(input).buildParsed();
11195        }
11196        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
11197            java.io.InputStream input,
11198            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11199            throws java.io.IOException {
11200          return newBuilder().mergeFrom(input, extensionRegistry)
11201                   .buildParsed();
11202        }
11203        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseDelimitedFrom(java.io.InputStream input)
11204            throws java.io.IOException {
11205          Builder builder = newBuilder();
11206          if (builder.mergeDelimitedFrom(input)) {
11207            return builder.buildParsed();
11208          } else {
11209            return null;
11210          }
11211        }
11212        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseDelimitedFrom(
11213            java.io.InputStream input,
11214            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11215            throws java.io.IOException {
11216          Builder builder = newBuilder();
11217          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
11218            return builder.buildParsed();
11219          } else {
11220            return null;
11221          }
11222        }
11223        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
11224            com.google.protobuf.CodedInputStream input)
11225            throws java.io.IOException {
11226          return newBuilder().mergeFrom(input).buildParsed();
11227        }
11228        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto parseFrom(
11229            com.google.protobuf.CodedInputStream input,
11230            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11231            throws java.io.IOException {
11232          return newBuilder().mergeFrom(input, extensionRegistry)
11233                   .buildParsed();
11234        }
11235        
11236        public static Builder newBuilder() { return Builder.create(); }
11237        public Builder newBuilderForType() { return newBuilder(); }
11238        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto prototype) {
11239          return newBuilder().mergeFrom(prototype);
11240        }
11241        public Builder toBuilder() { return newBuilder(this); }
11242        
11243        @java.lang.Override
11244        protected Builder newBuilderForType(
11245            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
11246          Builder builder = new Builder(parent);
11247          return builder;
11248        }
11249        public static final class Builder extends
11250            com.google.protobuf.GeneratedMessage.Builder<Builder>
11251           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProtoOrBuilder {
11252          public static final com.google.protobuf.Descriptors.Descriptor
11253              getDescriptor() {
11254            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor;
11255          }
11256          
11257          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11258              internalGetFieldAccessorTable() {
11259            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_fieldAccessorTable;
11260          }
11261          
11262          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.newBuilder()
11263          private Builder() {
11264            maybeForceBuilderInitialization();
11265          }
11266          
11267          private Builder(BuilderParent parent) {
11268            super(parent);
11269            maybeForceBuilderInitialization();
11270          }
11271          private void maybeForceBuilderInitialization() {
11272            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
11273              getManifestFieldBuilder();
11274            }
11275          }
11276          private static Builder create() {
11277            return new Builder();
11278          }
11279          
11280          public Builder clear() {
11281            super.clear();
11282            if (manifestBuilder_ == null) {
11283              manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
11284            } else {
11285              manifestBuilder_.clear();
11286            }
11287            bitField0_ = (bitField0_ & ~0x00000001);
11288            httpPort_ = 0;
11289            bitField0_ = (bitField0_ & ~0x00000002);
11290            return this;
11291          }
11292          
11293          public Builder clone() {
11294            return create().mergeFrom(buildPartial());
11295          }
11296          
11297          public com.google.protobuf.Descriptors.Descriptor
11298              getDescriptorForType() {
11299            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDescriptor();
11300          }
11301          
11302          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto getDefaultInstanceForType() {
11303            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance();
11304          }
11305          
11306          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto build() {
11307            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto result = buildPartial();
11308            if (!result.isInitialized()) {
11309              throw newUninitializedMessageException(result);
11310            }
11311            return result;
11312          }
11313          
11314          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto buildParsed()
11315              throws com.google.protobuf.InvalidProtocolBufferException {
11316            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto result = buildPartial();
11317            if (!result.isInitialized()) {
11318              throw newUninitializedMessageException(
11319                result).asInvalidProtocolBufferException();
11320            }
11321            return result;
11322          }
11323          
11324          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto buildPartial() {
11325            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto(this);
11326            int from_bitField0_ = bitField0_;
11327            int to_bitField0_ = 0;
11328            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
11329              to_bitField0_ |= 0x00000001;
11330            }
11331            if (manifestBuilder_ == null) {
11332              result.manifest_ = manifest_;
11333            } else {
11334              result.manifest_ = manifestBuilder_.build();
11335            }
11336            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
11337              to_bitField0_ |= 0x00000002;
11338            }
11339            result.httpPort_ = httpPort_;
11340            result.bitField0_ = to_bitField0_;
11341            onBuilt();
11342            return result;
11343          }
11344          
11345          public Builder mergeFrom(com.google.protobuf.Message other) {
11346            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) {
11347              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto)other);
11348            } else {
11349              super.mergeFrom(other);
11350              return this;
11351            }
11352          }
11353          
11354          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto other) {
11355            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance()) return this;
11356            if (other.hasManifest()) {
11357              mergeManifest(other.getManifest());
11358            }
11359            if (other.hasHttpPort()) {
11360              setHttpPort(other.getHttpPort());
11361            }
11362            this.mergeUnknownFields(other.getUnknownFields());
11363            return this;
11364          }
11365          
11366          public final boolean isInitialized() {
11367            if (!hasManifest()) {
11368              
11369              return false;
11370            }
11371            if (!hasHttpPort()) {
11372              
11373              return false;
11374            }
11375            if (!getManifest().isInitialized()) {
11376              
11377              return false;
11378            }
11379            return true;
11380          }
11381          
11382          public Builder mergeFrom(
11383              com.google.protobuf.CodedInputStream input,
11384              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11385              throws java.io.IOException {
11386            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
11387              com.google.protobuf.UnknownFieldSet.newBuilder(
11388                this.getUnknownFields());
11389            while (true) {
11390              int tag = input.readTag();
11391              switch (tag) {
11392                case 0:
11393                  this.setUnknownFields(unknownFields.build());
11394                  onChanged();
11395                  return this;
11396                default: {
11397                  if (!parseUnknownField(input, unknownFields,
11398                                         extensionRegistry, tag)) {
11399                    this.setUnknownFields(unknownFields.build());
11400                    onChanged();
11401                    return this;
11402                  }
11403                  break;
11404                }
11405                case 10: {
11406                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder subBuilder = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.newBuilder();
11407                  if (hasManifest()) {
11408                    subBuilder.mergeFrom(getManifest());
11409                  }
11410                  input.readMessage(subBuilder, extensionRegistry);
11411                  setManifest(subBuilder.buildPartial());
11412                  break;
11413                }
11414                case 16: {
11415                  bitField0_ |= 0x00000002;
11416                  httpPort_ = input.readUInt32();
11417                  break;
11418                }
11419              }
11420            }
11421          }
11422          
11423          private int bitField0_;
11424          
11425          // required .hadoop.hdfs.RemoteEditLogManifestProto manifest = 1;
11426          private org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
11427          private com.google.protobuf.SingleFieldBuilder<
11428              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder> manifestBuilder_;
11429          public boolean hasManifest() {
11430            return ((bitField0_ & 0x00000001) == 0x00000001);
11431          }
11432          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto getManifest() {
11433            if (manifestBuilder_ == null) {
11434              return manifest_;
11435            } else {
11436              return manifestBuilder_.getMessage();
11437            }
11438          }
11439          public Builder setManifest(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto value) {
11440            if (manifestBuilder_ == null) {
11441              if (value == null) {
11442                throw new NullPointerException();
11443              }
11444              manifest_ = value;
11445              onChanged();
11446            } else {
11447              manifestBuilder_.setMessage(value);
11448            }
11449            bitField0_ |= 0x00000001;
11450            return this;
11451          }
11452          public Builder setManifest(
11453              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder builderForValue) {
11454            if (manifestBuilder_ == null) {
11455              manifest_ = builderForValue.build();
11456              onChanged();
11457            } else {
11458              manifestBuilder_.setMessage(builderForValue.build());
11459            }
11460            bitField0_ |= 0x00000001;
11461            return this;
11462          }
11463          public Builder mergeManifest(org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto value) {
11464            if (manifestBuilder_ == null) {
11465              if (((bitField0_ & 0x00000001) == 0x00000001) &&
11466                  manifest_ != org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance()) {
11467                manifest_ =
11468                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.newBuilder(manifest_).mergeFrom(value).buildPartial();
11469              } else {
11470                manifest_ = value;
11471              }
11472              onChanged();
11473            } else {
11474              manifestBuilder_.mergeFrom(value);
11475            }
11476            bitField0_ |= 0x00000001;
11477            return this;
11478          }
11479          public Builder clearManifest() {
11480            if (manifestBuilder_ == null) {
11481              manifest_ = org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.getDefaultInstance();
11482              onChanged();
11483            } else {
11484              manifestBuilder_.clear();
11485            }
11486            bitField0_ = (bitField0_ & ~0x00000001);
11487            return this;
11488          }
11489          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder getManifestBuilder() {
11490            bitField0_ |= 0x00000001;
11491            onChanged();
11492            return getManifestFieldBuilder().getBuilder();
11493          }
11494          public org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder getManifestOrBuilder() {
11495            if (manifestBuilder_ != null) {
11496              return manifestBuilder_.getMessageOrBuilder();
11497            } else {
11498              return manifest_;
11499            }
11500          }
11501          private com.google.protobuf.SingleFieldBuilder<
11502              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder> 
11503              getManifestFieldBuilder() {
11504            if (manifestBuilder_ == null) {
11505              manifestBuilder_ = new com.google.protobuf.SingleFieldBuilder<
11506                  org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProto.Builder, org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RemoteEditLogManifestProtoOrBuilder>(
11507                      manifest_,
11508                      getParentForChildren(),
11509                      isClean());
11510              manifest_ = null;
11511            }
11512            return manifestBuilder_;
11513          }
11514          
11515          // required uint32 httpPort = 2;
11516          private int httpPort_ ;
11517          public boolean hasHttpPort() {
11518            return ((bitField0_ & 0x00000002) == 0x00000002);
11519          }
11520          public int getHttpPort() {
11521            return httpPort_;
11522          }
11523          public Builder setHttpPort(int value) {
11524            bitField0_ |= 0x00000002;
11525            httpPort_ = value;
11526            onChanged();
11527            return this;
11528          }
11529          public Builder clearHttpPort() {
11530            bitField0_ = (bitField0_ & ~0x00000002);
11531            httpPort_ = 0;
11532            onChanged();
11533            return this;
11534          }
11535          
11536          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.GetEditLogManifestResponseProto)
11537        }
11538        
11539        static {
11540          defaultInstance = new GetEditLogManifestResponseProto(true);
11541          defaultInstance.initFields();
11542        }
11543        
11544        // @@protoc_insertion_point(class_scope:hadoop.hdfs.GetEditLogManifestResponseProto)
11545      }
11546      
11547      public interface PrepareRecoveryRequestProtoOrBuilder
11548          extends com.google.protobuf.MessageOrBuilder {
11549        
11550        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
11551        boolean hasReqInfo();
11552        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
11553        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
11554        
11555        // required uint64 segmentTxId = 2;
11556        boolean hasSegmentTxId();
11557        long getSegmentTxId();
11558      }
11559      public static final class PrepareRecoveryRequestProto extends
11560          com.google.protobuf.GeneratedMessage
11561          implements PrepareRecoveryRequestProtoOrBuilder {
11562        // Use PrepareRecoveryRequestProto.newBuilder() to construct.
11563        private PrepareRecoveryRequestProto(Builder builder) {
11564          super(builder);
11565        }
11566        private PrepareRecoveryRequestProto(boolean noInit) {}
11567        
11568        private static final PrepareRecoveryRequestProto defaultInstance;
11569        public static PrepareRecoveryRequestProto getDefaultInstance() {
11570          return defaultInstance;
11571        }
11572        
11573        public PrepareRecoveryRequestProto getDefaultInstanceForType() {
11574          return defaultInstance;
11575        }
11576        
11577        public static final com.google.protobuf.Descriptors.Descriptor
11578            getDescriptor() {
11579          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor;
11580        }
11581        
11582        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11583            internalGetFieldAccessorTable() {
11584          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_fieldAccessorTable;
11585        }
11586        
11587        private int bitField0_;
11588        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
11589        public static final int REQINFO_FIELD_NUMBER = 1;
11590        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
11591        public boolean hasReqInfo() {
11592          return ((bitField0_ & 0x00000001) == 0x00000001);
11593        }
11594        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
11595          return reqInfo_;
11596        }
11597        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
11598          return reqInfo_;
11599        }
11600        
11601        // required uint64 segmentTxId = 2;
11602        public static final int SEGMENTTXID_FIELD_NUMBER = 2;
11603        private long segmentTxId_;
11604        public boolean hasSegmentTxId() {
11605          return ((bitField0_ & 0x00000002) == 0x00000002);
11606        }
11607        public long getSegmentTxId() {
11608          return segmentTxId_;
11609        }
11610        
11611        private void initFields() {
11612          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
11613          segmentTxId_ = 0L;
11614        }
11615        private byte memoizedIsInitialized = -1;
11616        public final boolean isInitialized() {
11617          byte isInitialized = memoizedIsInitialized;
11618          if (isInitialized != -1) return isInitialized == 1;
11619          
11620          if (!hasReqInfo()) {
11621            memoizedIsInitialized = 0;
11622            return false;
11623          }
11624          if (!hasSegmentTxId()) {
11625            memoizedIsInitialized = 0;
11626            return false;
11627          }
11628          if (!getReqInfo().isInitialized()) {
11629            memoizedIsInitialized = 0;
11630            return false;
11631          }
11632          memoizedIsInitialized = 1;
11633          return true;
11634        }
11635        
11636        public void writeTo(com.google.protobuf.CodedOutputStream output)
11637                            throws java.io.IOException {
11638          getSerializedSize();
11639          if (((bitField0_ & 0x00000001) == 0x00000001)) {
11640            output.writeMessage(1, reqInfo_);
11641          }
11642          if (((bitField0_ & 0x00000002) == 0x00000002)) {
11643            output.writeUInt64(2, segmentTxId_);
11644          }
11645          getUnknownFields().writeTo(output);
11646        }
11647        
11648        private int memoizedSerializedSize = -1;
11649        public int getSerializedSize() {
11650          int size = memoizedSerializedSize;
11651          if (size != -1) return size;
11652        
11653          size = 0;
11654          if (((bitField0_ & 0x00000001) == 0x00000001)) {
11655            size += com.google.protobuf.CodedOutputStream
11656              .computeMessageSize(1, reqInfo_);
11657          }
11658          if (((bitField0_ & 0x00000002) == 0x00000002)) {
11659            size += com.google.protobuf.CodedOutputStream
11660              .computeUInt64Size(2, segmentTxId_);
11661          }
11662          size += getUnknownFields().getSerializedSize();
11663          memoizedSerializedSize = size;
11664          return size;
11665        }
11666        
11667        private static final long serialVersionUID = 0L;
11668        @java.lang.Override
11669        protected java.lang.Object writeReplace()
11670            throws java.io.ObjectStreamException {
11671          return super.writeReplace();
11672        }
11673        
11674        @java.lang.Override
11675        public boolean equals(final java.lang.Object obj) {
11676          if (obj == this) {
11677           return true;
11678          }
11679          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)) {
11680            return super.equals(obj);
11681          }
11682          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto) obj;
11683          
11684          boolean result = true;
11685          result = result && (hasReqInfo() == other.hasReqInfo());
11686          if (hasReqInfo()) {
11687            result = result && getReqInfo()
11688                .equals(other.getReqInfo());
11689          }
11690          result = result && (hasSegmentTxId() == other.hasSegmentTxId());
11691          if (hasSegmentTxId()) {
11692            result = result && (getSegmentTxId()
11693                == other.getSegmentTxId());
11694          }
11695          result = result &&
11696              getUnknownFields().equals(other.getUnknownFields());
11697          return result;
11698        }
11699        
11700        @java.lang.Override
11701        public int hashCode() {
11702          int hash = 41;
11703          hash = (19 * hash) + getDescriptorForType().hashCode();
11704          if (hasReqInfo()) {
11705            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
11706            hash = (53 * hash) + getReqInfo().hashCode();
11707          }
11708          if (hasSegmentTxId()) {
11709            hash = (37 * hash) + SEGMENTTXID_FIELD_NUMBER;
11710            hash = (53 * hash) + hashLong(getSegmentTxId());
11711          }
11712          hash = (29 * hash) + getUnknownFields().hashCode();
11713          return hash;
11714        }
11715        
11716        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
11717            com.google.protobuf.ByteString data)
11718            throws com.google.protobuf.InvalidProtocolBufferException {
11719          return newBuilder().mergeFrom(data).buildParsed();
11720        }
11721        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
11722            com.google.protobuf.ByteString data,
11723            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11724            throws com.google.protobuf.InvalidProtocolBufferException {
11725          return newBuilder().mergeFrom(data, extensionRegistry)
11726                   .buildParsed();
11727        }
11728        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(byte[] data)
11729            throws com.google.protobuf.InvalidProtocolBufferException {
11730          return newBuilder().mergeFrom(data).buildParsed();
11731        }
11732        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
11733            byte[] data,
11734            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11735            throws com.google.protobuf.InvalidProtocolBufferException {
11736          return newBuilder().mergeFrom(data, extensionRegistry)
11737                   .buildParsed();
11738        }
11739        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(java.io.InputStream input)
11740            throws java.io.IOException {
11741          return newBuilder().mergeFrom(input).buildParsed();
11742        }
11743        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
11744            java.io.InputStream input,
11745            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11746            throws java.io.IOException {
11747          return newBuilder().mergeFrom(input, extensionRegistry)
11748                   .buildParsed();
11749        }
11750        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseDelimitedFrom(java.io.InputStream input)
11751            throws java.io.IOException {
11752          Builder builder = newBuilder();
11753          if (builder.mergeDelimitedFrom(input)) {
11754            return builder.buildParsed();
11755          } else {
11756            return null;
11757          }
11758        }
11759        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseDelimitedFrom(
11760            java.io.InputStream input,
11761            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11762            throws java.io.IOException {
11763          Builder builder = newBuilder();
11764          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
11765            return builder.buildParsed();
11766          } else {
11767            return null;
11768          }
11769        }
11770        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
11771            com.google.protobuf.CodedInputStream input)
11772            throws java.io.IOException {
11773          return newBuilder().mergeFrom(input).buildParsed();
11774        }
11775        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto parseFrom(
11776            com.google.protobuf.CodedInputStream input,
11777            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11778            throws java.io.IOException {
11779          return newBuilder().mergeFrom(input, extensionRegistry)
11780                   .buildParsed();
11781        }
11782        
11783        public static Builder newBuilder() { return Builder.create(); }
11784        public Builder newBuilderForType() { return newBuilder(); }
11785        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto prototype) {
11786          return newBuilder().mergeFrom(prototype);
11787        }
11788        public Builder toBuilder() { return newBuilder(this); }
11789        
11790        @java.lang.Override
11791        protected Builder newBuilderForType(
11792            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
11793          Builder builder = new Builder(parent);
11794          return builder;
11795        }
11796        public static final class Builder extends
11797            com.google.protobuf.GeneratedMessage.Builder<Builder>
11798           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProtoOrBuilder {
11799          public static final com.google.protobuf.Descriptors.Descriptor
11800              getDescriptor() {
11801            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor;
11802          }
11803          
11804          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
11805              internalGetFieldAccessorTable() {
11806            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_fieldAccessorTable;
11807          }
11808          
11809          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.newBuilder()
11810          private Builder() {
11811            maybeForceBuilderInitialization();
11812          }
11813          
11814          private Builder(BuilderParent parent) {
11815            super(parent);
11816            maybeForceBuilderInitialization();
11817          }
11818          private void maybeForceBuilderInitialization() {
11819            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
11820              getReqInfoFieldBuilder();
11821            }
11822          }
11823          private static Builder create() {
11824            return new Builder();
11825          }
11826          
11827          public Builder clear() {
11828            super.clear();
11829            if (reqInfoBuilder_ == null) {
11830              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
11831            } else {
11832              reqInfoBuilder_.clear();
11833            }
11834            bitField0_ = (bitField0_ & ~0x00000001);
11835            segmentTxId_ = 0L;
11836            bitField0_ = (bitField0_ & ~0x00000002);
11837            return this;
11838          }
11839          
11840          public Builder clone() {
11841            return create().mergeFrom(buildPartial());
11842          }
11843          
11844          public com.google.protobuf.Descriptors.Descriptor
11845              getDescriptorForType() {
11846            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDescriptor();
11847          }
11848          
11849          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto getDefaultInstanceForType() {
11850            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance();
11851          }
11852          
11853          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto build() {
11854            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto result = buildPartial();
11855            if (!result.isInitialized()) {
11856              throw newUninitializedMessageException(result);
11857            }
11858            return result;
11859          }
11860          
11861          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto buildParsed()
11862              throws com.google.protobuf.InvalidProtocolBufferException {
11863            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto result = buildPartial();
11864            if (!result.isInitialized()) {
11865              throw newUninitializedMessageException(
11866                result).asInvalidProtocolBufferException();
11867            }
11868            return result;
11869          }
11870          
11871          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto buildPartial() {
11872            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto(this);
11873            int from_bitField0_ = bitField0_;
11874            int to_bitField0_ = 0;
11875            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
11876              to_bitField0_ |= 0x00000001;
11877            }
11878            if (reqInfoBuilder_ == null) {
11879              result.reqInfo_ = reqInfo_;
11880            } else {
11881              result.reqInfo_ = reqInfoBuilder_.build();
11882            }
11883            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
11884              to_bitField0_ |= 0x00000002;
11885            }
11886            result.segmentTxId_ = segmentTxId_;
11887            result.bitField0_ = to_bitField0_;
11888            onBuilt();
11889            return result;
11890          }
11891          
11892          public Builder mergeFrom(com.google.protobuf.Message other) {
11893            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto) {
11894              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)other);
11895            } else {
11896              super.mergeFrom(other);
11897              return this;
11898            }
11899          }
11900          
11901          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto other) {
11902            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance()) return this;
11903            if (other.hasReqInfo()) {
11904              mergeReqInfo(other.getReqInfo());
11905            }
11906            if (other.hasSegmentTxId()) {
11907              setSegmentTxId(other.getSegmentTxId());
11908            }
11909            this.mergeUnknownFields(other.getUnknownFields());
11910            return this;
11911          }
11912          
11913          public final boolean isInitialized() {
11914            if (!hasReqInfo()) {
11915              
11916              return false;
11917            }
11918            if (!hasSegmentTxId()) {
11919              
11920              return false;
11921            }
11922            if (!getReqInfo().isInitialized()) {
11923              
11924              return false;
11925            }
11926            return true;
11927          }
11928          
11929          public Builder mergeFrom(
11930              com.google.protobuf.CodedInputStream input,
11931              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
11932              throws java.io.IOException {
11933            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
11934              com.google.protobuf.UnknownFieldSet.newBuilder(
11935                this.getUnknownFields());
11936            while (true) {
11937              int tag = input.readTag();
11938              switch (tag) {
11939                case 0:
11940                  this.setUnknownFields(unknownFields.build());
11941                  onChanged();
11942                  return this;
11943                default: {
11944                  if (!parseUnknownField(input, unknownFields,
11945                                         extensionRegistry, tag)) {
11946                    this.setUnknownFields(unknownFields.build());
11947                    onChanged();
11948                    return this;
11949                  }
11950                  break;
11951                }
11952                case 10: {
11953                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder();
11954                  if (hasReqInfo()) {
11955                    subBuilder.mergeFrom(getReqInfo());
11956                  }
11957                  input.readMessage(subBuilder, extensionRegistry);
11958                  setReqInfo(subBuilder.buildPartial());
11959                  break;
11960                }
11961                case 16: {
11962                  bitField0_ |= 0x00000002;
11963                  segmentTxId_ = input.readUInt64();
11964                  break;
11965                }
11966              }
11967            }
11968          }
11969          
11970          private int bitField0_;
11971          
11972          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
11973          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
11974          private com.google.protobuf.SingleFieldBuilder<
11975              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
11976          public boolean hasReqInfo() {
11977            return ((bitField0_ & 0x00000001) == 0x00000001);
11978          }
11979          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
11980            if (reqInfoBuilder_ == null) {
11981              return reqInfo_;
11982            } else {
11983              return reqInfoBuilder_.getMessage();
11984            }
11985          }
11986          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
11987            if (reqInfoBuilder_ == null) {
11988              if (value == null) {
11989                throw new NullPointerException();
11990              }
11991              reqInfo_ = value;
11992              onChanged();
11993            } else {
11994              reqInfoBuilder_.setMessage(value);
11995            }
11996            bitField0_ |= 0x00000001;
11997            return this;
11998          }
11999          public Builder setReqInfo(
12000              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
12001            if (reqInfoBuilder_ == null) {
12002              reqInfo_ = builderForValue.build();
12003              onChanged();
12004            } else {
12005              reqInfoBuilder_.setMessage(builderForValue.build());
12006            }
12007            bitField0_ |= 0x00000001;
12008            return this;
12009          }
12010          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
12011            if (reqInfoBuilder_ == null) {
12012              if (((bitField0_ & 0x00000001) == 0x00000001) &&
12013                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
12014                reqInfo_ =
12015                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
12016              } else {
12017                reqInfo_ = value;
12018              }
12019              onChanged();
12020            } else {
12021              reqInfoBuilder_.mergeFrom(value);
12022            }
12023            bitField0_ |= 0x00000001;
12024            return this;
12025          }
12026          public Builder clearReqInfo() {
12027            if (reqInfoBuilder_ == null) {
12028              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
12029              onChanged();
12030            } else {
12031              reqInfoBuilder_.clear();
12032            }
12033            bitField0_ = (bitField0_ & ~0x00000001);
12034            return this;
12035          }
12036          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
12037            bitField0_ |= 0x00000001;
12038            onChanged();
12039            return getReqInfoFieldBuilder().getBuilder();
12040          }
12041          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
12042            if (reqInfoBuilder_ != null) {
12043              return reqInfoBuilder_.getMessageOrBuilder();
12044            } else {
12045              return reqInfo_;
12046            }
12047          }
12048          private com.google.protobuf.SingleFieldBuilder<
12049              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
12050              getReqInfoFieldBuilder() {
12051            if (reqInfoBuilder_ == null) {
12052              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
12053                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
12054                      reqInfo_,
12055                      getParentForChildren(),
12056                      isClean());
12057              reqInfo_ = null;
12058            }
12059            return reqInfoBuilder_;
12060          }
12061          
12062          // required uint64 segmentTxId = 2;
12063          private long segmentTxId_ ;
12064          public boolean hasSegmentTxId() {
12065            return ((bitField0_ & 0x00000002) == 0x00000002);
12066          }
12067          public long getSegmentTxId() {
12068            return segmentTxId_;
12069          }
12070          public Builder setSegmentTxId(long value) {
12071            bitField0_ |= 0x00000002;
12072            segmentTxId_ = value;
12073            onChanged();
12074            return this;
12075          }
12076          public Builder clearSegmentTxId() {
12077            bitField0_ = (bitField0_ & ~0x00000002);
12078            segmentTxId_ = 0L;
12079            onChanged();
12080            return this;
12081          }
12082          
12083          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PrepareRecoveryRequestProto)
12084        }
12085        
12086        static {
12087          defaultInstance = new PrepareRecoveryRequestProto(true);
12088          defaultInstance.initFields();
12089        }
12090        
12091        // @@protoc_insertion_point(class_scope:hadoop.hdfs.PrepareRecoveryRequestProto)
12092      }
12093      
12094      public interface PrepareRecoveryResponseProtoOrBuilder
12095          extends com.google.protobuf.MessageOrBuilder {
12096        
12097        // optional .hadoop.hdfs.SegmentStateProto segmentState = 1;
12098        boolean hasSegmentState();
12099        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState();
12100        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder();
12101        
12102        // optional uint64 acceptedInEpoch = 2;
12103        boolean hasAcceptedInEpoch();
12104        long getAcceptedInEpoch();
12105        
12106        // required uint64 lastWriterEpoch = 3;
12107        boolean hasLastWriterEpoch();
12108        long getLastWriterEpoch();
12109        
12110        // optional uint64 lastCommittedTxId = 4;
12111        boolean hasLastCommittedTxId();
12112        long getLastCommittedTxId();
12113      }
12114      public static final class PrepareRecoveryResponseProto extends
12115          com.google.protobuf.GeneratedMessage
12116          implements PrepareRecoveryResponseProtoOrBuilder {
12117        // Use PrepareRecoveryResponseProto.newBuilder() to construct.
12118        private PrepareRecoveryResponseProto(Builder builder) {
12119          super(builder);
12120        }
12121        private PrepareRecoveryResponseProto(boolean noInit) {}
12122        
12123        private static final PrepareRecoveryResponseProto defaultInstance;
12124        public static PrepareRecoveryResponseProto getDefaultInstance() {
12125          return defaultInstance;
12126        }
12127        
12128        public PrepareRecoveryResponseProto getDefaultInstanceForType() {
12129          return defaultInstance;
12130        }
12131        
12132        public static final com.google.protobuf.Descriptors.Descriptor
12133            getDescriptor() {
12134          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor;
12135        }
12136        
12137        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
12138            internalGetFieldAccessorTable() {
12139          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_fieldAccessorTable;
12140        }
12141        
12142        private int bitField0_;
12143        // optional .hadoop.hdfs.SegmentStateProto segmentState = 1;
12144        public static final int SEGMENTSTATE_FIELD_NUMBER = 1;
12145        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_;
12146        public boolean hasSegmentState() {
12147          return ((bitField0_ & 0x00000001) == 0x00000001);
12148        }
12149        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
12150          return segmentState_;
12151        }
12152        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
12153          return segmentState_;
12154        }
12155        
12156        // optional uint64 acceptedInEpoch = 2;
12157        public static final int ACCEPTEDINEPOCH_FIELD_NUMBER = 2;
12158        private long acceptedInEpoch_;
12159        public boolean hasAcceptedInEpoch() {
12160          return ((bitField0_ & 0x00000002) == 0x00000002);
12161        }
12162        public long getAcceptedInEpoch() {
12163          return acceptedInEpoch_;
12164        }
12165        
12166        // required uint64 lastWriterEpoch = 3;
12167        public static final int LASTWRITEREPOCH_FIELD_NUMBER = 3;
12168        private long lastWriterEpoch_;
12169        public boolean hasLastWriterEpoch() {
12170          return ((bitField0_ & 0x00000004) == 0x00000004);
12171        }
12172        public long getLastWriterEpoch() {
12173          return lastWriterEpoch_;
12174        }
12175        
12176        // optional uint64 lastCommittedTxId = 4;
12177        public static final int LASTCOMMITTEDTXID_FIELD_NUMBER = 4;
12178        private long lastCommittedTxId_;
12179        public boolean hasLastCommittedTxId() {
12180          return ((bitField0_ & 0x00000008) == 0x00000008);
12181        }
12182        public long getLastCommittedTxId() {
12183          return lastCommittedTxId_;
12184        }
12185        
12186        private void initFields() {
12187          segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
12188          acceptedInEpoch_ = 0L;
12189          lastWriterEpoch_ = 0L;
12190          lastCommittedTxId_ = 0L;
12191        }
12192        private byte memoizedIsInitialized = -1;
12193        public final boolean isInitialized() {
12194          byte isInitialized = memoizedIsInitialized;
12195          if (isInitialized != -1) return isInitialized == 1;
12196          
12197          if (!hasLastWriterEpoch()) {
12198            memoizedIsInitialized = 0;
12199            return false;
12200          }
12201          if (hasSegmentState()) {
12202            if (!getSegmentState().isInitialized()) {
12203              memoizedIsInitialized = 0;
12204              return false;
12205            }
12206          }
12207          memoizedIsInitialized = 1;
12208          return true;
12209        }
12210        
12211        public void writeTo(com.google.protobuf.CodedOutputStream output)
12212                            throws java.io.IOException {
12213          getSerializedSize();
12214          if (((bitField0_ & 0x00000001) == 0x00000001)) {
12215            output.writeMessage(1, segmentState_);
12216          }
12217          if (((bitField0_ & 0x00000002) == 0x00000002)) {
12218            output.writeUInt64(2, acceptedInEpoch_);
12219          }
12220          if (((bitField0_ & 0x00000004) == 0x00000004)) {
12221            output.writeUInt64(3, lastWriterEpoch_);
12222          }
12223          if (((bitField0_ & 0x00000008) == 0x00000008)) {
12224            output.writeUInt64(4, lastCommittedTxId_);
12225          }
12226          getUnknownFields().writeTo(output);
12227        }
12228        
12229        private int memoizedSerializedSize = -1;
12230        public int getSerializedSize() {
12231          int size = memoizedSerializedSize;
12232          if (size != -1) return size;
12233        
12234          size = 0;
12235          if (((bitField0_ & 0x00000001) == 0x00000001)) {
12236            size += com.google.protobuf.CodedOutputStream
12237              .computeMessageSize(1, segmentState_);
12238          }
12239          if (((bitField0_ & 0x00000002) == 0x00000002)) {
12240            size += com.google.protobuf.CodedOutputStream
12241              .computeUInt64Size(2, acceptedInEpoch_);
12242          }
12243          if (((bitField0_ & 0x00000004) == 0x00000004)) {
12244            size += com.google.protobuf.CodedOutputStream
12245              .computeUInt64Size(3, lastWriterEpoch_);
12246          }
12247          if (((bitField0_ & 0x00000008) == 0x00000008)) {
12248            size += com.google.protobuf.CodedOutputStream
12249              .computeUInt64Size(4, lastCommittedTxId_);
12250          }
12251          size += getUnknownFields().getSerializedSize();
12252          memoizedSerializedSize = size;
12253          return size;
12254        }
12255        
12256        private static final long serialVersionUID = 0L;
12257        @java.lang.Override
12258        protected java.lang.Object writeReplace()
12259            throws java.io.ObjectStreamException {
12260          return super.writeReplace();
12261        }
12262        
12263        @java.lang.Override
12264        public boolean equals(final java.lang.Object obj) {
12265          if (obj == this) {
12266           return true;
12267          }
12268          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto)) {
12269            return super.equals(obj);
12270          }
12271          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) obj;
12272          
12273          boolean result = true;
12274          result = result && (hasSegmentState() == other.hasSegmentState());
12275          if (hasSegmentState()) {
12276            result = result && getSegmentState()
12277                .equals(other.getSegmentState());
12278          }
12279          result = result && (hasAcceptedInEpoch() == other.hasAcceptedInEpoch());
12280          if (hasAcceptedInEpoch()) {
12281            result = result && (getAcceptedInEpoch()
12282                == other.getAcceptedInEpoch());
12283          }
12284          result = result && (hasLastWriterEpoch() == other.hasLastWriterEpoch());
12285          if (hasLastWriterEpoch()) {
12286            result = result && (getLastWriterEpoch()
12287                == other.getLastWriterEpoch());
12288          }
12289          result = result && (hasLastCommittedTxId() == other.hasLastCommittedTxId());
12290          if (hasLastCommittedTxId()) {
12291            result = result && (getLastCommittedTxId()
12292                == other.getLastCommittedTxId());
12293          }
12294          result = result &&
12295              getUnknownFields().equals(other.getUnknownFields());
12296          return result;
12297        }
12298        
12299        @java.lang.Override
12300        public int hashCode() {
12301          int hash = 41;
12302          hash = (19 * hash) + getDescriptorForType().hashCode();
12303          if (hasSegmentState()) {
12304            hash = (37 * hash) + SEGMENTSTATE_FIELD_NUMBER;
12305            hash = (53 * hash) + getSegmentState().hashCode();
12306          }
12307          if (hasAcceptedInEpoch()) {
12308            hash = (37 * hash) + ACCEPTEDINEPOCH_FIELD_NUMBER;
12309            hash = (53 * hash) + hashLong(getAcceptedInEpoch());
12310          }
12311          if (hasLastWriterEpoch()) {
12312            hash = (37 * hash) + LASTWRITEREPOCH_FIELD_NUMBER;
12313            hash = (53 * hash) + hashLong(getLastWriterEpoch());
12314          }
12315          if (hasLastCommittedTxId()) {
12316            hash = (37 * hash) + LASTCOMMITTEDTXID_FIELD_NUMBER;
12317            hash = (53 * hash) + hashLong(getLastCommittedTxId());
12318          }
12319          hash = (29 * hash) + getUnknownFields().hashCode();
12320          return hash;
12321        }
12322        
12323        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
12324            com.google.protobuf.ByteString data)
12325            throws com.google.protobuf.InvalidProtocolBufferException {
12326          return newBuilder().mergeFrom(data).buildParsed();
12327        }
12328        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
12329            com.google.protobuf.ByteString data,
12330            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12331            throws com.google.protobuf.InvalidProtocolBufferException {
12332          return newBuilder().mergeFrom(data, extensionRegistry)
12333                   .buildParsed();
12334        }
12335        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(byte[] data)
12336            throws com.google.protobuf.InvalidProtocolBufferException {
12337          return newBuilder().mergeFrom(data).buildParsed();
12338        }
12339        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
12340            byte[] data,
12341            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12342            throws com.google.protobuf.InvalidProtocolBufferException {
12343          return newBuilder().mergeFrom(data, extensionRegistry)
12344                   .buildParsed();
12345        }
12346        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(java.io.InputStream input)
12347            throws java.io.IOException {
12348          return newBuilder().mergeFrom(input).buildParsed();
12349        }
12350        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
12351            java.io.InputStream input,
12352            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12353            throws java.io.IOException {
12354          return newBuilder().mergeFrom(input, extensionRegistry)
12355                   .buildParsed();
12356        }
12357        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseDelimitedFrom(java.io.InputStream input)
12358            throws java.io.IOException {
12359          Builder builder = newBuilder();
12360          if (builder.mergeDelimitedFrom(input)) {
12361            return builder.buildParsed();
12362          } else {
12363            return null;
12364          }
12365        }
12366        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseDelimitedFrom(
12367            java.io.InputStream input,
12368            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12369            throws java.io.IOException {
12370          Builder builder = newBuilder();
12371          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
12372            return builder.buildParsed();
12373          } else {
12374            return null;
12375          }
12376        }
12377        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
12378            com.google.protobuf.CodedInputStream input)
12379            throws java.io.IOException {
12380          return newBuilder().mergeFrom(input).buildParsed();
12381        }
12382        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto parseFrom(
12383            com.google.protobuf.CodedInputStream input,
12384            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12385            throws java.io.IOException {
12386          return newBuilder().mergeFrom(input, extensionRegistry)
12387                   .buildParsed();
12388        }
12389        
12390        public static Builder newBuilder() { return Builder.create(); }
12391        public Builder newBuilderForType() { return newBuilder(); }
12392        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto prototype) {
12393          return newBuilder().mergeFrom(prototype);
12394        }
12395        public Builder toBuilder() { return newBuilder(this); }
12396        
12397        @java.lang.Override
12398        protected Builder newBuilderForType(
12399            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
12400          Builder builder = new Builder(parent);
12401          return builder;
12402        }
12403        public static final class Builder extends
12404            com.google.protobuf.GeneratedMessage.Builder<Builder>
12405           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProtoOrBuilder {
12406          public static final com.google.protobuf.Descriptors.Descriptor
12407              getDescriptor() {
12408            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor;
12409          }
12410          
12411          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
12412              internalGetFieldAccessorTable() {
12413            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_fieldAccessorTable;
12414          }
12415          
12416          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.newBuilder()
12417          private Builder() {
12418            maybeForceBuilderInitialization();
12419          }
12420          
12421          private Builder(BuilderParent parent) {
12422            super(parent);
12423            maybeForceBuilderInitialization();
12424          }
12425          private void maybeForceBuilderInitialization() {
12426            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
12427              getSegmentStateFieldBuilder();
12428            }
12429          }
12430          private static Builder create() {
12431            return new Builder();
12432          }
12433          
12434          public Builder clear() {
12435            super.clear();
12436            if (segmentStateBuilder_ == null) {
12437              segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
12438            } else {
12439              segmentStateBuilder_.clear();
12440            }
12441            bitField0_ = (bitField0_ & ~0x00000001);
12442            acceptedInEpoch_ = 0L;
12443            bitField0_ = (bitField0_ & ~0x00000002);
12444            lastWriterEpoch_ = 0L;
12445            bitField0_ = (bitField0_ & ~0x00000004);
12446            lastCommittedTxId_ = 0L;
12447            bitField0_ = (bitField0_ & ~0x00000008);
12448            return this;
12449          }
12450          
12451          public Builder clone() {
12452            return create().mergeFrom(buildPartial());
12453          }
12454          
12455          public com.google.protobuf.Descriptors.Descriptor
12456              getDescriptorForType() {
12457            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDescriptor();
12458          }
12459          
12460          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto getDefaultInstanceForType() {
12461            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance();
12462          }
12463          
12464          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto build() {
12465            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto result = buildPartial();
12466            if (!result.isInitialized()) {
12467              throw newUninitializedMessageException(result);
12468            }
12469            return result;
12470          }
12471          
12472          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto buildParsed()
12473              throws com.google.protobuf.InvalidProtocolBufferException {
12474            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto result = buildPartial();
12475            if (!result.isInitialized()) {
12476              throw newUninitializedMessageException(
12477                result).asInvalidProtocolBufferException();
12478            }
12479            return result;
12480          }
12481          
12482          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto buildPartial() {
12483            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto(this);
12484            int from_bitField0_ = bitField0_;
12485            int to_bitField0_ = 0;
12486            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
12487              to_bitField0_ |= 0x00000001;
12488            }
12489            if (segmentStateBuilder_ == null) {
12490              result.segmentState_ = segmentState_;
12491            } else {
12492              result.segmentState_ = segmentStateBuilder_.build();
12493            }
12494            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
12495              to_bitField0_ |= 0x00000002;
12496            }
12497            result.acceptedInEpoch_ = acceptedInEpoch_;
12498            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
12499              to_bitField0_ |= 0x00000004;
12500            }
12501            result.lastWriterEpoch_ = lastWriterEpoch_;
12502            if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
12503              to_bitField0_ |= 0x00000008;
12504            }
12505            result.lastCommittedTxId_ = lastCommittedTxId_;
12506            result.bitField0_ = to_bitField0_;
12507            onBuilt();
12508            return result;
12509          }
12510          
12511          public Builder mergeFrom(com.google.protobuf.Message other) {
12512            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) {
12513              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto)other);
12514            } else {
12515              super.mergeFrom(other);
12516              return this;
12517            }
12518          }
12519          
12520          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto other) {
12521            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance()) return this;
12522            if (other.hasSegmentState()) {
12523              mergeSegmentState(other.getSegmentState());
12524            }
12525            if (other.hasAcceptedInEpoch()) {
12526              setAcceptedInEpoch(other.getAcceptedInEpoch());
12527            }
12528            if (other.hasLastWriterEpoch()) {
12529              setLastWriterEpoch(other.getLastWriterEpoch());
12530            }
12531            if (other.hasLastCommittedTxId()) {
12532              setLastCommittedTxId(other.getLastCommittedTxId());
12533            }
12534            this.mergeUnknownFields(other.getUnknownFields());
12535            return this;
12536          }
12537          
12538          public final boolean isInitialized() {
12539            if (!hasLastWriterEpoch()) {
12540              
12541              return false;
12542            }
12543            if (hasSegmentState()) {
12544              if (!getSegmentState().isInitialized()) {
12545                
12546                return false;
12547              }
12548            }
12549            return true;
12550          }
12551          
12552          public Builder mergeFrom(
12553              com.google.protobuf.CodedInputStream input,
12554              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
12555              throws java.io.IOException {
12556            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
12557              com.google.protobuf.UnknownFieldSet.newBuilder(
12558                this.getUnknownFields());
12559            while (true) {
12560              int tag = input.readTag();
12561              switch (tag) {
12562                case 0:
12563                  this.setUnknownFields(unknownFields.build());
12564                  onChanged();
12565                  return this;
12566                default: {
12567                  if (!parseUnknownField(input, unknownFields,
12568                                         extensionRegistry, tag)) {
12569                    this.setUnknownFields(unknownFields.build());
12570                    onChanged();
12571                    return this;
12572                  }
12573                  break;
12574                }
12575                case 10: {
12576                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder();
12577                  if (hasSegmentState()) {
12578                    subBuilder.mergeFrom(getSegmentState());
12579                  }
12580                  input.readMessage(subBuilder, extensionRegistry);
12581                  setSegmentState(subBuilder.buildPartial());
12582                  break;
12583                }
12584                case 16: {
12585                  bitField0_ |= 0x00000002;
12586                  acceptedInEpoch_ = input.readUInt64();
12587                  break;
12588                }
12589                case 24: {
12590                  bitField0_ |= 0x00000004;
12591                  lastWriterEpoch_ = input.readUInt64();
12592                  break;
12593                }
12594                case 32: {
12595                  bitField0_ |= 0x00000008;
12596                  lastCommittedTxId_ = input.readUInt64();
12597                  break;
12598                }
12599              }
12600            }
12601          }
12602          
12603          private int bitField0_;
12604          
12605          // optional .hadoop.hdfs.SegmentStateProto segmentState = 1;
12606          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
12607          private com.google.protobuf.SingleFieldBuilder<
12608              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> segmentStateBuilder_;
12609          public boolean hasSegmentState() {
12610            return ((bitField0_ & 0x00000001) == 0x00000001);
12611          }
12612          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getSegmentState() {
12613            if (segmentStateBuilder_ == null) {
12614              return segmentState_;
12615            } else {
12616              return segmentStateBuilder_.getMessage();
12617            }
12618          }
12619          public Builder setSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
12620            if (segmentStateBuilder_ == null) {
12621              if (value == null) {
12622                throw new NullPointerException();
12623              }
12624              segmentState_ = value;
12625              onChanged();
12626            } else {
12627              segmentStateBuilder_.setMessage(value);
12628            }
12629            bitField0_ |= 0x00000001;
12630            return this;
12631          }
12632          public Builder setSegmentState(
12633              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) {
12634            if (segmentStateBuilder_ == null) {
12635              segmentState_ = builderForValue.build();
12636              onChanged();
12637            } else {
12638              segmentStateBuilder_.setMessage(builderForValue.build());
12639            }
12640            bitField0_ |= 0x00000001;
12641            return this;
12642          }
12643          public Builder mergeSegmentState(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
12644            if (segmentStateBuilder_ == null) {
12645              if (((bitField0_ & 0x00000001) == 0x00000001) &&
12646                  segmentState_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
12647                segmentState_ =
12648                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(segmentState_).mergeFrom(value).buildPartial();
12649              } else {
12650                segmentState_ = value;
12651              }
12652              onChanged();
12653            } else {
12654              segmentStateBuilder_.mergeFrom(value);
12655            }
12656            bitField0_ |= 0x00000001;
12657            return this;
12658          }
12659          public Builder clearSegmentState() {
12660            if (segmentStateBuilder_ == null) {
12661              segmentState_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
12662              onChanged();
12663            } else {
12664              segmentStateBuilder_.clear();
12665            }
12666            bitField0_ = (bitField0_ & ~0x00000001);
12667            return this;
12668          }
12669          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getSegmentStateBuilder() {
12670            bitField0_ |= 0x00000001;
12671            onChanged();
12672            return getSegmentStateFieldBuilder().getBuilder();
12673          }
12674          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getSegmentStateOrBuilder() {
12675            if (segmentStateBuilder_ != null) {
12676              return segmentStateBuilder_.getMessageOrBuilder();
12677            } else {
12678              return segmentState_;
12679            }
12680          }
12681          private com.google.protobuf.SingleFieldBuilder<
12682              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> 
12683              getSegmentStateFieldBuilder() {
12684            if (segmentStateBuilder_ == null) {
12685              segmentStateBuilder_ = new com.google.protobuf.SingleFieldBuilder<
12686                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>(
12687                      segmentState_,
12688                      getParentForChildren(),
12689                      isClean());
12690              segmentState_ = null;
12691            }
12692            return segmentStateBuilder_;
12693          }
12694          
12695          // optional uint64 acceptedInEpoch = 2;
12696          private long acceptedInEpoch_ ;
12697          public boolean hasAcceptedInEpoch() {
12698            return ((bitField0_ & 0x00000002) == 0x00000002);
12699          }
12700          public long getAcceptedInEpoch() {
12701            return acceptedInEpoch_;
12702          }
12703          public Builder setAcceptedInEpoch(long value) {
12704            bitField0_ |= 0x00000002;
12705            acceptedInEpoch_ = value;
12706            onChanged();
12707            return this;
12708          }
12709          public Builder clearAcceptedInEpoch() {
12710            bitField0_ = (bitField0_ & ~0x00000002);
12711            acceptedInEpoch_ = 0L;
12712            onChanged();
12713            return this;
12714          }
12715          
12716          // required uint64 lastWriterEpoch = 3;
12717          private long lastWriterEpoch_ ;
12718          public boolean hasLastWriterEpoch() {
12719            return ((bitField0_ & 0x00000004) == 0x00000004);
12720          }
12721          public long getLastWriterEpoch() {
12722            return lastWriterEpoch_;
12723          }
12724          public Builder setLastWriterEpoch(long value) {
12725            bitField0_ |= 0x00000004;
12726            lastWriterEpoch_ = value;
12727            onChanged();
12728            return this;
12729          }
12730          public Builder clearLastWriterEpoch() {
12731            bitField0_ = (bitField0_ & ~0x00000004);
12732            lastWriterEpoch_ = 0L;
12733            onChanged();
12734            return this;
12735          }
12736          
12737          // optional uint64 lastCommittedTxId = 4;
12738          private long lastCommittedTxId_ ;
12739          public boolean hasLastCommittedTxId() {
12740            return ((bitField0_ & 0x00000008) == 0x00000008);
12741          }
12742          public long getLastCommittedTxId() {
12743            return lastCommittedTxId_;
12744          }
12745          public Builder setLastCommittedTxId(long value) {
12746            bitField0_ |= 0x00000008;
12747            lastCommittedTxId_ = value;
12748            onChanged();
12749            return this;
12750          }
12751          public Builder clearLastCommittedTxId() {
12752            bitField0_ = (bitField0_ & ~0x00000008);
12753            lastCommittedTxId_ = 0L;
12754            onChanged();
12755            return this;
12756          }
12757          
12758          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.PrepareRecoveryResponseProto)
12759        }
12760        
12761        static {
12762          defaultInstance = new PrepareRecoveryResponseProto(true);
12763          defaultInstance.initFields();
12764        }
12765        
12766        // @@protoc_insertion_point(class_scope:hadoop.hdfs.PrepareRecoveryResponseProto)
12767      }
12768      
12769      public interface AcceptRecoveryRequestProtoOrBuilder
12770          extends com.google.protobuf.MessageOrBuilder {
12771        
12772        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
12773        boolean hasReqInfo();
12774        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo();
12775        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder();
12776        
12777        // required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;
12778        boolean hasStateToAccept();
12779        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getStateToAccept();
12780        org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getStateToAcceptOrBuilder();
12781        
12782        // required string fromURL = 3;
12783        boolean hasFromURL();
12784        String getFromURL();
12785      }
12786      public static final class AcceptRecoveryRequestProto extends
12787          com.google.protobuf.GeneratedMessage
12788          implements AcceptRecoveryRequestProtoOrBuilder {
12789        // Use AcceptRecoveryRequestProto.newBuilder() to construct.
12790        private AcceptRecoveryRequestProto(Builder builder) {
12791          super(builder);
12792        }
12793        private AcceptRecoveryRequestProto(boolean noInit) {}
12794        
12795        private static final AcceptRecoveryRequestProto defaultInstance;
12796        public static AcceptRecoveryRequestProto getDefaultInstance() {
12797          return defaultInstance;
12798        }
12799        
12800        public AcceptRecoveryRequestProto getDefaultInstanceForType() {
12801          return defaultInstance;
12802        }
12803        
12804        public static final com.google.protobuf.Descriptors.Descriptor
12805            getDescriptor() {
12806          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor;
12807        }
12808        
12809        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
12810            internalGetFieldAccessorTable() {
12811          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_fieldAccessorTable;
12812        }
12813        
12814        private int bitField0_;
12815        // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
12816        public static final int REQINFO_FIELD_NUMBER = 1;
12817        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_;
12818        public boolean hasReqInfo() {
12819          return ((bitField0_ & 0x00000001) == 0x00000001);
12820        }
12821        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
12822          return reqInfo_;
12823        }
12824        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
12825          return reqInfo_;
12826        }
12827        
12828        // required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;
12829        public static final int STATETOACCEPT_FIELD_NUMBER = 2;
12830        private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto stateToAccept_;
12831        public boolean hasStateToAccept() {
12832          return ((bitField0_ & 0x00000002) == 0x00000002);
12833        }
12834        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getStateToAccept() {
12835          return stateToAccept_;
12836        }
12837        public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getStateToAcceptOrBuilder() {
12838          return stateToAccept_;
12839        }
12840        
12841        // required string fromURL = 3;
12842        public static final int FROMURL_FIELD_NUMBER = 3;
12843        private java.lang.Object fromURL_;
12844        public boolean hasFromURL() {
12845          return ((bitField0_ & 0x00000004) == 0x00000004);
12846        }
12847        public String getFromURL() {
12848          java.lang.Object ref = fromURL_;
12849          if (ref instanceof String) {
12850            return (String) ref;
12851          } else {
12852            com.google.protobuf.ByteString bs = 
12853                (com.google.protobuf.ByteString) ref;
12854            String s = bs.toStringUtf8();
12855            if (com.google.protobuf.Internal.isValidUtf8(bs)) {
12856              fromURL_ = s;
12857            }
12858            return s;
12859          }
12860        }
12861        private com.google.protobuf.ByteString getFromURLBytes() {
12862          java.lang.Object ref = fromURL_;
12863          if (ref instanceof String) {
12864            com.google.protobuf.ByteString b = 
12865                com.google.protobuf.ByteString.copyFromUtf8((String) ref);
12866            fromURL_ = b;
12867            return b;
12868          } else {
12869            return (com.google.protobuf.ByteString) ref;
12870          }
12871        }
12872        
12873        private void initFields() {
12874          reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
12875          stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
12876          fromURL_ = "";
12877        }
12878        private byte memoizedIsInitialized = -1;
12879        public final boolean isInitialized() {
12880          byte isInitialized = memoizedIsInitialized;
12881          if (isInitialized != -1) return isInitialized == 1;
12882          
12883          if (!hasReqInfo()) {
12884            memoizedIsInitialized = 0;
12885            return false;
12886          }
12887          if (!hasStateToAccept()) {
12888            memoizedIsInitialized = 0;
12889            return false;
12890          }
12891          if (!hasFromURL()) {
12892            memoizedIsInitialized = 0;
12893            return false;
12894          }
12895          if (!getReqInfo().isInitialized()) {
12896            memoizedIsInitialized = 0;
12897            return false;
12898          }
12899          if (!getStateToAccept().isInitialized()) {
12900            memoizedIsInitialized = 0;
12901            return false;
12902          }
12903          memoizedIsInitialized = 1;
12904          return true;
12905        }
12906        
12907        public void writeTo(com.google.protobuf.CodedOutputStream output)
12908                            throws java.io.IOException {
12909          getSerializedSize();
12910          if (((bitField0_ & 0x00000001) == 0x00000001)) {
12911            output.writeMessage(1, reqInfo_);
12912          }
12913          if (((bitField0_ & 0x00000002) == 0x00000002)) {
12914            output.writeMessage(2, stateToAccept_);
12915          }
12916          if (((bitField0_ & 0x00000004) == 0x00000004)) {
12917            output.writeBytes(3, getFromURLBytes());
12918          }
12919          getUnknownFields().writeTo(output);
12920        }
12921        
12922        private int memoizedSerializedSize = -1;
12923        public int getSerializedSize() {
12924          int size = memoizedSerializedSize;
12925          if (size != -1) return size;
12926        
12927          size = 0;
12928          if (((bitField0_ & 0x00000001) == 0x00000001)) {
12929            size += com.google.protobuf.CodedOutputStream
12930              .computeMessageSize(1, reqInfo_);
12931          }
12932          if (((bitField0_ & 0x00000002) == 0x00000002)) {
12933            size += com.google.protobuf.CodedOutputStream
12934              .computeMessageSize(2, stateToAccept_);
12935          }
12936          if (((bitField0_ & 0x00000004) == 0x00000004)) {
12937            size += com.google.protobuf.CodedOutputStream
12938              .computeBytesSize(3, getFromURLBytes());
12939          }
12940          size += getUnknownFields().getSerializedSize();
12941          memoizedSerializedSize = size;
12942          return size;
12943        }
12944        
12945        private static final long serialVersionUID = 0L;
12946        @java.lang.Override
12947        protected java.lang.Object writeReplace()
12948            throws java.io.ObjectStreamException {
12949          return super.writeReplace();
12950        }
12951        
12952        @java.lang.Override
12953        public boolean equals(final java.lang.Object obj) {
12954          if (obj == this) {
12955           return true;
12956          }
12957          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)) {
12958            return super.equals(obj);
12959          }
12960          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto) obj;
12961          
12962          boolean result = true;
12963          result = result && (hasReqInfo() == other.hasReqInfo());
12964          if (hasReqInfo()) {
12965            result = result && getReqInfo()
12966                .equals(other.getReqInfo());
12967          }
12968          result = result && (hasStateToAccept() == other.hasStateToAccept());
12969          if (hasStateToAccept()) {
12970            result = result && getStateToAccept()
12971                .equals(other.getStateToAccept());
12972          }
12973          result = result && (hasFromURL() == other.hasFromURL());
12974          if (hasFromURL()) {
12975            result = result && getFromURL()
12976                .equals(other.getFromURL());
12977          }
12978          result = result &&
12979              getUnknownFields().equals(other.getUnknownFields());
12980          return result;
12981        }
12982        
12983        @java.lang.Override
12984        public int hashCode() {
12985          int hash = 41;
12986          hash = (19 * hash) + getDescriptorForType().hashCode();
12987          if (hasReqInfo()) {
12988            hash = (37 * hash) + REQINFO_FIELD_NUMBER;
12989            hash = (53 * hash) + getReqInfo().hashCode();
12990          }
12991          if (hasStateToAccept()) {
12992            hash = (37 * hash) + STATETOACCEPT_FIELD_NUMBER;
12993            hash = (53 * hash) + getStateToAccept().hashCode();
12994          }
12995          if (hasFromURL()) {
12996            hash = (37 * hash) + FROMURL_FIELD_NUMBER;
12997            hash = (53 * hash) + getFromURL().hashCode();
12998          }
12999          hash = (29 * hash) + getUnknownFields().hashCode();
13000          return hash;
13001        }
13002        
13003        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
13004            com.google.protobuf.ByteString data)
13005            throws com.google.protobuf.InvalidProtocolBufferException {
13006          return newBuilder().mergeFrom(data).buildParsed();
13007        }
13008        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
13009            com.google.protobuf.ByteString data,
13010            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13011            throws com.google.protobuf.InvalidProtocolBufferException {
13012          return newBuilder().mergeFrom(data, extensionRegistry)
13013                   .buildParsed();
13014        }
13015        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(byte[] data)
13016            throws com.google.protobuf.InvalidProtocolBufferException {
13017          return newBuilder().mergeFrom(data).buildParsed();
13018        }
13019        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
13020            byte[] data,
13021            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13022            throws com.google.protobuf.InvalidProtocolBufferException {
13023          return newBuilder().mergeFrom(data, extensionRegistry)
13024                   .buildParsed();
13025        }
13026        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(java.io.InputStream input)
13027            throws java.io.IOException {
13028          return newBuilder().mergeFrom(input).buildParsed();
13029        }
13030        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
13031            java.io.InputStream input,
13032            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13033            throws java.io.IOException {
13034          return newBuilder().mergeFrom(input, extensionRegistry)
13035                   .buildParsed();
13036        }
13037        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseDelimitedFrom(java.io.InputStream input)
13038            throws java.io.IOException {
13039          Builder builder = newBuilder();
13040          if (builder.mergeDelimitedFrom(input)) {
13041            return builder.buildParsed();
13042          } else {
13043            return null;
13044          }
13045        }
13046        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseDelimitedFrom(
13047            java.io.InputStream input,
13048            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13049            throws java.io.IOException {
13050          Builder builder = newBuilder();
13051          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
13052            return builder.buildParsed();
13053          } else {
13054            return null;
13055          }
13056        }
13057        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
13058            com.google.protobuf.CodedInputStream input)
13059            throws java.io.IOException {
13060          return newBuilder().mergeFrom(input).buildParsed();
13061        }
13062        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto parseFrom(
13063            com.google.protobuf.CodedInputStream input,
13064            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13065            throws java.io.IOException {
13066          return newBuilder().mergeFrom(input, extensionRegistry)
13067                   .buildParsed();
13068        }
13069        
13070        public static Builder newBuilder() { return Builder.create(); }
13071        public Builder newBuilderForType() { return newBuilder(); }
13072        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto prototype) {
13073          return newBuilder().mergeFrom(prototype);
13074        }
13075        public Builder toBuilder() { return newBuilder(this); }
13076        
13077        @java.lang.Override
13078        protected Builder newBuilderForType(
13079            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
13080          Builder builder = new Builder(parent);
13081          return builder;
13082        }
13083        public static final class Builder extends
13084            com.google.protobuf.GeneratedMessage.Builder<Builder>
13085           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProtoOrBuilder {
13086          public static final com.google.protobuf.Descriptors.Descriptor
13087              getDescriptor() {
13088            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor;
13089          }
13090          
13091          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
13092              internalGetFieldAccessorTable() {
13093            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_fieldAccessorTable;
13094          }
13095          
13096          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.newBuilder()
13097          private Builder() {
13098            maybeForceBuilderInitialization();
13099          }
13100          
13101          private Builder(BuilderParent parent) {
13102            super(parent);
13103            maybeForceBuilderInitialization();
13104          }
13105          private void maybeForceBuilderInitialization() {
13106            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
13107              getReqInfoFieldBuilder();
13108              getStateToAcceptFieldBuilder();
13109            }
13110          }
13111          private static Builder create() {
13112            return new Builder();
13113          }
13114          
13115          public Builder clear() {
13116            super.clear();
13117            if (reqInfoBuilder_ == null) {
13118              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
13119            } else {
13120              reqInfoBuilder_.clear();
13121            }
13122            bitField0_ = (bitField0_ & ~0x00000001);
13123            if (stateToAcceptBuilder_ == null) {
13124              stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
13125            } else {
13126              stateToAcceptBuilder_.clear();
13127            }
13128            bitField0_ = (bitField0_ & ~0x00000002);
13129            fromURL_ = "";
13130            bitField0_ = (bitField0_ & ~0x00000004);
13131            return this;
13132          }
13133          
13134          public Builder clone() {
13135            return create().mergeFrom(buildPartial());
13136          }
13137          
13138          public com.google.protobuf.Descriptors.Descriptor
13139              getDescriptorForType() {
13140            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDescriptor();
13141          }
13142          
13143          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto getDefaultInstanceForType() {
13144            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance();
13145          }
13146          
13147          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto build() {
13148            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto result = buildPartial();
13149            if (!result.isInitialized()) {
13150              throw newUninitializedMessageException(result);
13151            }
13152            return result;
13153          }
13154          
13155          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto buildParsed()
13156              throws com.google.protobuf.InvalidProtocolBufferException {
13157            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto result = buildPartial();
13158            if (!result.isInitialized()) {
13159              throw newUninitializedMessageException(
13160                result).asInvalidProtocolBufferException();
13161            }
13162            return result;
13163          }
13164          
13165          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto buildPartial() {
13166            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto(this);
13167            int from_bitField0_ = bitField0_;
13168            int to_bitField0_ = 0;
13169            if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
13170              to_bitField0_ |= 0x00000001;
13171            }
13172            if (reqInfoBuilder_ == null) {
13173              result.reqInfo_ = reqInfo_;
13174            } else {
13175              result.reqInfo_ = reqInfoBuilder_.build();
13176            }
13177            if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
13178              to_bitField0_ |= 0x00000002;
13179            }
13180            if (stateToAcceptBuilder_ == null) {
13181              result.stateToAccept_ = stateToAccept_;
13182            } else {
13183              result.stateToAccept_ = stateToAcceptBuilder_.build();
13184            }
13185            if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
13186              to_bitField0_ |= 0x00000004;
13187            }
13188            result.fromURL_ = fromURL_;
13189            result.bitField0_ = to_bitField0_;
13190            onBuilt();
13191            return result;
13192          }
13193          
13194          public Builder mergeFrom(com.google.protobuf.Message other) {
13195            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto) {
13196              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)other);
13197            } else {
13198              super.mergeFrom(other);
13199              return this;
13200            }
13201          }
13202          
13203          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto other) {
13204            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance()) return this;
13205            if (other.hasReqInfo()) {
13206              mergeReqInfo(other.getReqInfo());
13207            }
13208            if (other.hasStateToAccept()) {
13209              mergeStateToAccept(other.getStateToAccept());
13210            }
13211            if (other.hasFromURL()) {
13212              setFromURL(other.getFromURL());
13213            }
13214            this.mergeUnknownFields(other.getUnknownFields());
13215            return this;
13216          }
13217          
13218          public final boolean isInitialized() {
13219            if (!hasReqInfo()) {
13220              
13221              return false;
13222            }
13223            if (!hasStateToAccept()) {
13224              
13225              return false;
13226            }
13227            if (!hasFromURL()) {
13228              
13229              return false;
13230            }
13231            if (!getReqInfo().isInitialized()) {
13232              
13233              return false;
13234            }
13235            if (!getStateToAccept().isInitialized()) {
13236              
13237              return false;
13238            }
13239            return true;
13240          }
13241          
13242          public Builder mergeFrom(
13243              com.google.protobuf.CodedInputStream input,
13244              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13245              throws java.io.IOException {
13246            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
13247              com.google.protobuf.UnknownFieldSet.newBuilder(
13248                this.getUnknownFields());
13249            while (true) {
13250              int tag = input.readTag();
13251              switch (tag) {
13252                case 0:
13253                  this.setUnknownFields(unknownFields.build());
13254                  onChanged();
13255                  return this;
13256                default: {
13257                  if (!parseUnknownField(input, unknownFields,
13258                                         extensionRegistry, tag)) {
13259                    this.setUnknownFields(unknownFields.build());
13260                    onChanged();
13261                    return this;
13262                  }
13263                  break;
13264                }
13265                case 10: {
13266                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder();
13267                  if (hasReqInfo()) {
13268                    subBuilder.mergeFrom(getReqInfo());
13269                  }
13270                  input.readMessage(subBuilder, extensionRegistry);
13271                  setReqInfo(subBuilder.buildPartial());
13272                  break;
13273                }
13274                case 18: {
13275                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder subBuilder = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder();
13276                  if (hasStateToAccept()) {
13277                    subBuilder.mergeFrom(getStateToAccept());
13278                  }
13279                  input.readMessage(subBuilder, extensionRegistry);
13280                  setStateToAccept(subBuilder.buildPartial());
13281                  break;
13282                }
13283                case 26: {
13284                  bitField0_ |= 0x00000004;
13285                  fromURL_ = input.readBytes();
13286                  break;
13287                }
13288              }
13289            }
13290          }
13291          
13292          private int bitField0_;
13293          
13294          // required .hadoop.hdfs.RequestInfoProto reqInfo = 1;
13295          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
13296          private com.google.protobuf.SingleFieldBuilder<
13297              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> reqInfoBuilder_;
13298          public boolean hasReqInfo() {
13299            return ((bitField0_ & 0x00000001) == 0x00000001);
13300          }
13301          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto getReqInfo() {
13302            if (reqInfoBuilder_ == null) {
13303              return reqInfo_;
13304            } else {
13305              return reqInfoBuilder_.getMessage();
13306            }
13307          }
13308          public Builder setReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
13309            if (reqInfoBuilder_ == null) {
13310              if (value == null) {
13311                throw new NullPointerException();
13312              }
13313              reqInfo_ = value;
13314              onChanged();
13315            } else {
13316              reqInfoBuilder_.setMessage(value);
13317            }
13318            bitField0_ |= 0x00000001;
13319            return this;
13320          }
13321          public Builder setReqInfo(
13322              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder builderForValue) {
13323            if (reqInfoBuilder_ == null) {
13324              reqInfo_ = builderForValue.build();
13325              onChanged();
13326            } else {
13327              reqInfoBuilder_.setMessage(builderForValue.build());
13328            }
13329            bitField0_ |= 0x00000001;
13330            return this;
13331          }
13332          public Builder mergeReqInfo(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto value) {
13333            if (reqInfoBuilder_ == null) {
13334              if (((bitField0_ & 0x00000001) == 0x00000001) &&
13335                  reqInfo_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance()) {
13336                reqInfo_ =
13337                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.newBuilder(reqInfo_).mergeFrom(value).buildPartial();
13338              } else {
13339                reqInfo_ = value;
13340              }
13341              onChanged();
13342            } else {
13343              reqInfoBuilder_.mergeFrom(value);
13344            }
13345            bitField0_ |= 0x00000001;
13346            return this;
13347          }
13348          public Builder clearReqInfo() {
13349            if (reqInfoBuilder_ == null) {
13350              reqInfo_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.getDefaultInstance();
13351              onChanged();
13352            } else {
13353              reqInfoBuilder_.clear();
13354            }
13355            bitField0_ = (bitField0_ & ~0x00000001);
13356            return this;
13357          }
13358          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder getReqInfoBuilder() {
13359            bitField0_ |= 0x00000001;
13360            onChanged();
13361            return getReqInfoFieldBuilder().getBuilder();
13362          }
13363          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder getReqInfoOrBuilder() {
13364            if (reqInfoBuilder_ != null) {
13365              return reqInfoBuilder_.getMessageOrBuilder();
13366            } else {
13367              return reqInfo_;
13368            }
13369          }
13370          private com.google.protobuf.SingleFieldBuilder<
13371              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder> 
13372              getReqInfoFieldBuilder() {
13373            if (reqInfoBuilder_ == null) {
13374              reqInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
13375                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProtoOrBuilder>(
13376                      reqInfo_,
13377                      getParentForChildren(),
13378                      isClean());
13379              reqInfo_ = null;
13380            }
13381            return reqInfoBuilder_;
13382          }
13383          
13384          // required .hadoop.hdfs.SegmentStateProto stateToAccept = 2;
13385          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
13386          private com.google.protobuf.SingleFieldBuilder<
13387              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> stateToAcceptBuilder_;
13388          public boolean hasStateToAccept() {
13389            return ((bitField0_ & 0x00000002) == 0x00000002);
13390          }
13391          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto getStateToAccept() {
13392            if (stateToAcceptBuilder_ == null) {
13393              return stateToAccept_;
13394            } else {
13395              return stateToAcceptBuilder_.getMessage();
13396            }
13397          }
13398          public Builder setStateToAccept(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
13399            if (stateToAcceptBuilder_ == null) {
13400              if (value == null) {
13401                throw new NullPointerException();
13402              }
13403              stateToAccept_ = value;
13404              onChanged();
13405            } else {
13406              stateToAcceptBuilder_.setMessage(value);
13407            }
13408            bitField0_ |= 0x00000002;
13409            return this;
13410          }
13411          public Builder setStateToAccept(
13412              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder builderForValue) {
13413            if (stateToAcceptBuilder_ == null) {
13414              stateToAccept_ = builderForValue.build();
13415              onChanged();
13416            } else {
13417              stateToAcceptBuilder_.setMessage(builderForValue.build());
13418            }
13419            bitField0_ |= 0x00000002;
13420            return this;
13421          }
13422          public Builder mergeStateToAccept(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto value) {
13423            if (stateToAcceptBuilder_ == null) {
13424              if (((bitField0_ & 0x00000002) == 0x00000002) &&
13425                  stateToAccept_ != org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance()) {
13426                stateToAccept_ =
13427                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.newBuilder(stateToAccept_).mergeFrom(value).buildPartial();
13428              } else {
13429                stateToAccept_ = value;
13430              }
13431              onChanged();
13432            } else {
13433              stateToAcceptBuilder_.mergeFrom(value);
13434            }
13435            bitField0_ |= 0x00000002;
13436            return this;
13437          }
13438          public Builder clearStateToAccept() {
13439            if (stateToAcceptBuilder_ == null) {
13440              stateToAccept_ = org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.getDefaultInstance();
13441              onChanged();
13442            } else {
13443              stateToAcceptBuilder_.clear();
13444            }
13445            bitField0_ = (bitField0_ & ~0x00000002);
13446            return this;
13447          }
13448          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder getStateToAcceptBuilder() {
13449            bitField0_ |= 0x00000002;
13450            onChanged();
13451            return getStateToAcceptFieldBuilder().getBuilder();
13452          }
13453          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder getStateToAcceptOrBuilder() {
13454            if (stateToAcceptBuilder_ != null) {
13455              return stateToAcceptBuilder_.getMessageOrBuilder();
13456            } else {
13457              return stateToAccept_;
13458            }
13459          }
13460          private com.google.protobuf.SingleFieldBuilder<
13461              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder> 
13462              getStateToAcceptFieldBuilder() {
13463            if (stateToAcceptBuilder_ == null) {
13464              stateToAcceptBuilder_ = new com.google.protobuf.SingleFieldBuilder<
13465                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder, org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProtoOrBuilder>(
13466                      stateToAccept_,
13467                      getParentForChildren(),
13468                      isClean());
13469              stateToAccept_ = null;
13470            }
13471            return stateToAcceptBuilder_;
13472          }
13473          
13474          // required string fromURL = 3;
13475          private java.lang.Object fromURL_ = "";
13476          public boolean hasFromURL() {
13477            return ((bitField0_ & 0x00000004) == 0x00000004);
13478          }
13479          public String getFromURL() {
13480            java.lang.Object ref = fromURL_;
13481            if (!(ref instanceof String)) {
13482              String s = ((com.google.protobuf.ByteString) ref).toStringUtf8();
13483              fromURL_ = s;
13484              return s;
13485            } else {
13486              return (String) ref;
13487            }
13488          }
13489          public Builder setFromURL(String value) {
13490            if (value == null) {
13491        throw new NullPointerException();
13492      }
13493      bitField0_ |= 0x00000004;
13494            fromURL_ = value;
13495            onChanged();
13496            return this;
13497          }
13498          public Builder clearFromURL() {
13499            bitField0_ = (bitField0_ & ~0x00000004);
13500            fromURL_ = getDefaultInstance().getFromURL();
13501            onChanged();
13502            return this;
13503          }
13504          void setFromURL(com.google.protobuf.ByteString value) {
13505            bitField0_ |= 0x00000004;
13506            fromURL_ = value;
13507            onChanged();
13508          }
13509          
13510          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.AcceptRecoveryRequestProto)
13511        }
13512        
13513        static {
13514          defaultInstance = new AcceptRecoveryRequestProto(true);
13515          defaultInstance.initFields();
13516        }
13517        
13518        // @@protoc_insertion_point(class_scope:hadoop.hdfs.AcceptRecoveryRequestProto)
13519      }
13520      
13521      public interface AcceptRecoveryResponseProtoOrBuilder
13522          extends com.google.protobuf.MessageOrBuilder {
13523      }
13524      public static final class AcceptRecoveryResponseProto extends
13525          com.google.protobuf.GeneratedMessage
13526          implements AcceptRecoveryResponseProtoOrBuilder {
13527        // Use AcceptRecoveryResponseProto.newBuilder() to construct.
13528        private AcceptRecoveryResponseProto(Builder builder) {
13529          super(builder);
13530        }
13531        private AcceptRecoveryResponseProto(boolean noInit) {}
13532        
13533        private static final AcceptRecoveryResponseProto defaultInstance;
13534        public static AcceptRecoveryResponseProto getDefaultInstance() {
13535          return defaultInstance;
13536        }
13537        
13538        public AcceptRecoveryResponseProto getDefaultInstanceForType() {
13539          return defaultInstance;
13540        }
13541        
13542        public static final com.google.protobuf.Descriptors.Descriptor
13543            getDescriptor() {
13544          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor;
13545        }
13546        
13547        protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
13548            internalGetFieldAccessorTable() {
13549          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_fieldAccessorTable;
13550        }
13551        
13552        private void initFields() {
13553        }
13554        private byte memoizedIsInitialized = -1;
13555        public final boolean isInitialized() {
13556          byte isInitialized = memoizedIsInitialized;
13557          if (isInitialized != -1) return isInitialized == 1;
13558          
13559          memoizedIsInitialized = 1;
13560          return true;
13561        }
13562        
13563        public void writeTo(com.google.protobuf.CodedOutputStream output)
13564                            throws java.io.IOException {
13565          getSerializedSize();
13566          getUnknownFields().writeTo(output);
13567        }
13568        
13569        private int memoizedSerializedSize = -1;
13570        public int getSerializedSize() {
13571          int size = memoizedSerializedSize;
13572          if (size != -1) return size;
13573        
13574          size = 0;
13575          size += getUnknownFields().getSerializedSize();
13576          memoizedSerializedSize = size;
13577          return size;
13578        }
13579        
13580        private static final long serialVersionUID = 0L;
13581        @java.lang.Override
13582        protected java.lang.Object writeReplace()
13583            throws java.io.ObjectStreamException {
13584          return super.writeReplace();
13585        }
13586        
13587        @java.lang.Override
13588        public boolean equals(final java.lang.Object obj) {
13589          if (obj == this) {
13590           return true;
13591          }
13592          if (!(obj instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto)) {
13593            return super.equals(obj);
13594          }
13595          org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto other = (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) obj;
13596          
13597          boolean result = true;
13598          result = result &&
13599              getUnknownFields().equals(other.getUnknownFields());
13600          return result;
13601        }
13602        
13603        @java.lang.Override
13604        public int hashCode() {
13605          int hash = 41;
13606          hash = (19 * hash) + getDescriptorForType().hashCode();
13607          hash = (29 * hash) + getUnknownFields().hashCode();
13608          return hash;
13609        }
13610        
13611        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
13612            com.google.protobuf.ByteString data)
13613            throws com.google.protobuf.InvalidProtocolBufferException {
13614          return newBuilder().mergeFrom(data).buildParsed();
13615        }
13616        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
13617            com.google.protobuf.ByteString data,
13618            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13619            throws com.google.protobuf.InvalidProtocolBufferException {
13620          return newBuilder().mergeFrom(data, extensionRegistry)
13621                   .buildParsed();
13622        }
13623        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(byte[] data)
13624            throws com.google.protobuf.InvalidProtocolBufferException {
13625          return newBuilder().mergeFrom(data).buildParsed();
13626        }
13627        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
13628            byte[] data,
13629            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13630            throws com.google.protobuf.InvalidProtocolBufferException {
13631          return newBuilder().mergeFrom(data, extensionRegistry)
13632                   .buildParsed();
13633        }
13634        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(java.io.InputStream input)
13635            throws java.io.IOException {
13636          return newBuilder().mergeFrom(input).buildParsed();
13637        }
13638        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
13639            java.io.InputStream input,
13640            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13641            throws java.io.IOException {
13642          return newBuilder().mergeFrom(input, extensionRegistry)
13643                   .buildParsed();
13644        }
13645        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseDelimitedFrom(java.io.InputStream input)
13646            throws java.io.IOException {
13647          Builder builder = newBuilder();
13648          if (builder.mergeDelimitedFrom(input)) {
13649            return builder.buildParsed();
13650          } else {
13651            return null;
13652          }
13653        }
13654        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseDelimitedFrom(
13655            java.io.InputStream input,
13656            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13657            throws java.io.IOException {
13658          Builder builder = newBuilder();
13659          if (builder.mergeDelimitedFrom(input, extensionRegistry)) {
13660            return builder.buildParsed();
13661          } else {
13662            return null;
13663          }
13664        }
13665        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
13666            com.google.protobuf.CodedInputStream input)
13667            throws java.io.IOException {
13668          return newBuilder().mergeFrom(input).buildParsed();
13669        }
13670        public static org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto parseFrom(
13671            com.google.protobuf.CodedInputStream input,
13672            com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13673            throws java.io.IOException {
13674          return newBuilder().mergeFrom(input, extensionRegistry)
13675                   .buildParsed();
13676        }
13677        
13678        public static Builder newBuilder() { return Builder.create(); }
13679        public Builder newBuilderForType() { return newBuilder(); }
13680        public static Builder newBuilder(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto prototype) {
13681          return newBuilder().mergeFrom(prototype);
13682        }
13683        public Builder toBuilder() { return newBuilder(this); }
13684        
13685        @java.lang.Override
13686        protected Builder newBuilderForType(
13687            com.google.protobuf.GeneratedMessage.BuilderParent parent) {
13688          Builder builder = new Builder(parent);
13689          return builder;
13690        }
13691        public static final class Builder extends
13692            com.google.protobuf.GeneratedMessage.Builder<Builder>
13693           implements org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProtoOrBuilder {
13694          public static final com.google.protobuf.Descriptors.Descriptor
13695              getDescriptor() {
13696            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor;
13697          }
13698          
13699          protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
13700              internalGetFieldAccessorTable() {
13701            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_fieldAccessorTable;
13702          }
13703          
13704          // Construct using org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.newBuilder()
13705          private Builder() {
13706            maybeForceBuilderInitialization();
13707          }
13708          
13709          private Builder(BuilderParent parent) {
13710            super(parent);
13711            maybeForceBuilderInitialization();
13712          }
13713          private void maybeForceBuilderInitialization() {
13714            if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
13715            }
13716          }
13717          private static Builder create() {
13718            return new Builder();
13719          }
13720          
13721          public Builder clear() {
13722            super.clear();
13723            return this;
13724          }
13725          
13726          public Builder clone() {
13727            return create().mergeFrom(buildPartial());
13728          }
13729          
13730          public com.google.protobuf.Descriptors.Descriptor
13731              getDescriptorForType() {
13732            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDescriptor();
13733          }
13734          
13735          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto getDefaultInstanceForType() {
13736            return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance();
13737          }
13738          
13739          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto build() {
13740            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto result = buildPartial();
13741            if (!result.isInitialized()) {
13742              throw newUninitializedMessageException(result);
13743            }
13744            return result;
13745          }
13746          
13747          private org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto buildParsed()
13748              throws com.google.protobuf.InvalidProtocolBufferException {
13749            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto result = buildPartial();
13750            if (!result.isInitialized()) {
13751              throw newUninitializedMessageException(
13752                result).asInvalidProtocolBufferException();
13753            }
13754            return result;
13755          }
13756          
13757          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto buildPartial() {
13758            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto result = new org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto(this);
13759            onBuilt();
13760            return result;
13761          }
13762          
13763          public Builder mergeFrom(com.google.protobuf.Message other) {
13764            if (other instanceof org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) {
13765              return mergeFrom((org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto)other);
13766            } else {
13767              super.mergeFrom(other);
13768              return this;
13769            }
13770          }
13771          
13772          public Builder mergeFrom(org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto other) {
13773            if (other == org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance()) return this;
13774            this.mergeUnknownFields(other.getUnknownFields());
13775            return this;
13776          }
13777          
13778          public final boolean isInitialized() {
13779            return true;
13780          }
13781          
13782          public Builder mergeFrom(
13783              com.google.protobuf.CodedInputStream input,
13784              com.google.protobuf.ExtensionRegistryLite extensionRegistry)
13785              throws java.io.IOException {
13786            com.google.protobuf.UnknownFieldSet.Builder unknownFields =
13787              com.google.protobuf.UnknownFieldSet.newBuilder(
13788                this.getUnknownFields());
13789            while (true) {
13790              int tag = input.readTag();
13791              switch (tag) {
13792                case 0:
13793                  this.setUnknownFields(unknownFields.build());
13794                  onChanged();
13795                  return this;
13796                default: {
13797                  if (!parseUnknownField(input, unknownFields,
13798                                         extensionRegistry, tag)) {
13799                    this.setUnknownFields(unknownFields.build());
13800                    onChanged();
13801                    return this;
13802                  }
13803                  break;
13804                }
13805              }
13806            }
13807          }
13808          
13809          
13810          // @@protoc_insertion_point(builder_scope:hadoop.hdfs.AcceptRecoveryResponseProto)
13811        }
13812        
13813        static {
13814          defaultInstance = new AcceptRecoveryResponseProto(true);
13815          defaultInstance.initFields();
13816        }
13817        
13818        // @@protoc_insertion_point(class_scope:hadoop.hdfs.AcceptRecoveryResponseProto)
13819      }
13820      
13821      public static abstract class QJournalProtocolService
13822          implements com.google.protobuf.Service {
13823        protected QJournalProtocolService() {}
13824        
13825        public interface Interface {
13826          public abstract void isFormatted(
13827              com.google.protobuf.RpcController controller,
13828              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
13829              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto> done);
13830          
13831          public abstract void getJournalState(
13832              com.google.protobuf.RpcController controller,
13833              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
13834              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto> done);
13835          
13836          public abstract void newEpoch(
13837              com.google.protobuf.RpcController controller,
13838              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
13839              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto> done);
13840          
13841          public abstract void format(
13842              com.google.protobuf.RpcController controller,
13843              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
13844              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto> done);
13845          
13846          public abstract void journal(
13847              com.google.protobuf.RpcController controller,
13848              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
13849              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto> done);
13850          
13851          public abstract void heartbeat(
13852              com.google.protobuf.RpcController controller,
13853              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
13854              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto> done);
13855          
13856          public abstract void startLogSegment(
13857              com.google.protobuf.RpcController controller,
13858              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
13859              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto> done);
13860          
13861          public abstract void finalizeLogSegment(
13862              com.google.protobuf.RpcController controller,
13863              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
13864              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto> done);
13865          
13866          public abstract void purgeLogs(
13867              com.google.protobuf.RpcController controller,
13868              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
13869              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto> done);
13870          
13871          public abstract void getEditLogManifest(
13872              com.google.protobuf.RpcController controller,
13873              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
13874              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto> done);
13875          
13876          public abstract void prepareRecovery(
13877              com.google.protobuf.RpcController controller,
13878              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
13879              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto> done);
13880          
13881          public abstract void acceptRecovery(
13882              com.google.protobuf.RpcController controller,
13883              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
13884              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto> done);
13885          
13886        }
13887        
13888        public static com.google.protobuf.Service newReflectiveService(
13889            final Interface impl) {
13890          return new QJournalProtocolService() {
13891            @java.lang.Override
13892            public  void isFormatted(
13893                com.google.protobuf.RpcController controller,
13894                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
13895                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto> done) {
13896              impl.isFormatted(controller, request, done);
13897            }
13898            
13899            @java.lang.Override
13900            public  void getJournalState(
13901                com.google.protobuf.RpcController controller,
13902                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
13903                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto> done) {
13904              impl.getJournalState(controller, request, done);
13905            }
13906            
13907            @java.lang.Override
13908            public  void newEpoch(
13909                com.google.protobuf.RpcController controller,
13910                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
13911                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto> done) {
13912              impl.newEpoch(controller, request, done);
13913            }
13914            
13915            @java.lang.Override
13916            public  void format(
13917                com.google.protobuf.RpcController controller,
13918                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
13919                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto> done) {
13920              impl.format(controller, request, done);
13921            }
13922            
13923            @java.lang.Override
13924            public  void journal(
13925                com.google.protobuf.RpcController controller,
13926                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
13927                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto> done) {
13928              impl.journal(controller, request, done);
13929            }
13930            
13931            @java.lang.Override
13932            public  void heartbeat(
13933                com.google.protobuf.RpcController controller,
13934                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
13935                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto> done) {
13936              impl.heartbeat(controller, request, done);
13937            }
13938            
13939            @java.lang.Override
13940            public  void startLogSegment(
13941                com.google.protobuf.RpcController controller,
13942                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
13943                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto> done) {
13944              impl.startLogSegment(controller, request, done);
13945            }
13946            
13947            @java.lang.Override
13948            public  void finalizeLogSegment(
13949                com.google.protobuf.RpcController controller,
13950                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
13951                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto> done) {
13952              impl.finalizeLogSegment(controller, request, done);
13953            }
13954            
13955            @java.lang.Override
13956            public  void purgeLogs(
13957                com.google.protobuf.RpcController controller,
13958                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
13959                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto> done) {
13960              impl.purgeLogs(controller, request, done);
13961            }
13962            
13963            @java.lang.Override
13964            public  void getEditLogManifest(
13965                com.google.protobuf.RpcController controller,
13966                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
13967                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto> done) {
13968              impl.getEditLogManifest(controller, request, done);
13969            }
13970            
13971            @java.lang.Override
13972            public  void prepareRecovery(
13973                com.google.protobuf.RpcController controller,
13974                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
13975                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto> done) {
13976              impl.prepareRecovery(controller, request, done);
13977            }
13978            
13979            @java.lang.Override
13980            public  void acceptRecovery(
13981                com.google.protobuf.RpcController controller,
13982                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
13983                com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto> done) {
13984              impl.acceptRecovery(controller, request, done);
13985            }
13986            
13987          };
13988        }
13989        
13990        public static com.google.protobuf.BlockingService
13991            newReflectiveBlockingService(final BlockingInterface impl) {
13992          return new com.google.protobuf.BlockingService() {
13993            public final com.google.protobuf.Descriptors.ServiceDescriptor
13994                getDescriptorForType() {
13995              return getDescriptor();
13996            }
13997            
13998            public final com.google.protobuf.Message callBlockingMethod(
13999                com.google.protobuf.Descriptors.MethodDescriptor method,
14000                com.google.protobuf.RpcController controller,
14001                com.google.protobuf.Message request)
14002                throws com.google.protobuf.ServiceException {
14003              if (method.getService() != getDescriptor()) {
14004                throw new java.lang.IllegalArgumentException(
14005                  "Service.callBlockingMethod() given method descriptor for " +
14006                  "wrong service type.");
14007              }
14008              switch(method.getIndex()) {
14009                case 0:
14010                  return impl.isFormatted(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)request);
14011                case 1:
14012                  return impl.getJournalState(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)request);
14013                case 2:
14014                  return impl.newEpoch(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)request);
14015                case 3:
14016                  return impl.format(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)request);
14017                case 4:
14018                  return impl.journal(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)request);
14019                case 5:
14020                  return impl.heartbeat(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)request);
14021                case 6:
14022                  return impl.startLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)request);
14023                case 7:
14024                  return impl.finalizeLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)request);
14025                case 8:
14026                  return impl.purgeLogs(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)request);
14027                case 9:
14028                  return impl.getEditLogManifest(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)request);
14029                case 10:
14030                  return impl.prepareRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)request);
14031                case 11:
14032                  return impl.acceptRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)request);
14033                default:
14034                  throw new java.lang.AssertionError("Can't get here.");
14035              }
14036            }
14037            
14038            public final com.google.protobuf.Message
14039                getRequestPrototype(
14040                com.google.protobuf.Descriptors.MethodDescriptor method) {
14041              if (method.getService() != getDescriptor()) {
14042                throw new java.lang.IllegalArgumentException(
14043                  "Service.getRequestPrototype() given method " +
14044                  "descriptor for wrong service type.");
14045              }
14046              switch(method.getIndex()) {
14047                case 0:
14048                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance();
14049                case 1:
14050                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance();
14051                case 2:
14052                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance();
14053                case 3:
14054                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance();
14055                case 4:
14056                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance();
14057                case 5:
14058                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
14059                case 6:
14060                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance();
14061                case 7:
14062                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance();
14063                case 8:
14064                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance();
14065                case 9:
14066                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance();
14067                case 10:
14068                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance();
14069                case 11:
14070                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance();
14071                default:
14072                  throw new java.lang.AssertionError("Can't get here.");
14073              }
14074            }
14075            
14076            public final com.google.protobuf.Message
14077                getResponsePrototype(
14078                com.google.protobuf.Descriptors.MethodDescriptor method) {
14079              if (method.getService() != getDescriptor()) {
14080                throw new java.lang.IllegalArgumentException(
14081                  "Service.getResponsePrototype() given method " +
14082                  "descriptor for wrong service type.");
14083              }
14084              switch(method.getIndex()) {
14085                case 0:
14086                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance();
14087                case 1:
14088                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance();
14089                case 2:
14090                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance();
14091                case 3:
14092                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance();
14093                case 4:
14094                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance();
14095                case 5:
14096                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
14097                case 6:
14098                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance();
14099                case 7:
14100                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance();
14101                case 8:
14102                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance();
14103                case 9:
14104                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance();
14105                case 10:
14106                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance();
14107                case 11:
14108                  return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance();
14109                default:
14110                  throw new java.lang.AssertionError("Can't get here.");
14111              }
14112            }
14113            
14114          };
14115        }
14116        
14117        public abstract void isFormatted(
14118            com.google.protobuf.RpcController controller,
14119            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
14120            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto> done);
14121        
14122        public abstract void getJournalState(
14123            com.google.protobuf.RpcController controller,
14124            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
14125            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto> done);
14126        
14127        public abstract void newEpoch(
14128            com.google.protobuf.RpcController controller,
14129            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
14130            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto> done);
14131        
14132        public abstract void format(
14133            com.google.protobuf.RpcController controller,
14134            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
14135            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto> done);
14136        
14137        public abstract void journal(
14138            com.google.protobuf.RpcController controller,
14139            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
14140            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto> done);
14141        
14142        public abstract void heartbeat(
14143            com.google.protobuf.RpcController controller,
14144            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
14145            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto> done);
14146        
14147        public abstract void startLogSegment(
14148            com.google.protobuf.RpcController controller,
14149            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
14150            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto> done);
14151        
14152        public abstract void finalizeLogSegment(
14153            com.google.protobuf.RpcController controller,
14154            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
14155            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto> done);
14156        
14157        public abstract void purgeLogs(
14158            com.google.protobuf.RpcController controller,
14159            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
14160            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto> done);
14161        
14162        public abstract void getEditLogManifest(
14163            com.google.protobuf.RpcController controller,
14164            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
14165            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto> done);
14166        
14167        public abstract void prepareRecovery(
14168            com.google.protobuf.RpcController controller,
14169            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
14170            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto> done);
14171        
14172        public abstract void acceptRecovery(
14173            com.google.protobuf.RpcController controller,
14174            org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
14175            com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto> done);
14176        
14177        public static final
14178            com.google.protobuf.Descriptors.ServiceDescriptor
14179            getDescriptor() {
14180          return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.getDescriptor().getServices().get(0);
14181        }
14182        public final com.google.protobuf.Descriptors.ServiceDescriptor
14183            getDescriptorForType() {
14184          return getDescriptor();
14185        }
14186        
14187        public final void callMethod(
14188            com.google.protobuf.Descriptors.MethodDescriptor method,
14189            com.google.protobuf.RpcController controller,
14190            com.google.protobuf.Message request,
14191            com.google.protobuf.RpcCallback<
14192              com.google.protobuf.Message> done) {
14193          if (method.getService() != getDescriptor()) {
14194            throw new java.lang.IllegalArgumentException(
14195              "Service.callMethod() given method descriptor for wrong " +
14196              "service type.");
14197          }
14198          switch(method.getIndex()) {
14199            case 0:
14200              this.isFormatted(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto)request,
14201                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto>specializeCallback(
14202                  done));
14203              return;
14204            case 1:
14205              this.getJournalState(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto)request,
14206                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto>specializeCallback(
14207                  done));
14208              return;
14209            case 2:
14210              this.newEpoch(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto)request,
14211                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto>specializeCallback(
14212                  done));
14213              return;
14214            case 3:
14215              this.format(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto)request,
14216                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto>specializeCallback(
14217                  done));
14218              return;
14219            case 4:
14220              this.journal(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto)request,
14221                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto>specializeCallback(
14222                  done));
14223              return;
14224            case 5:
14225              this.heartbeat(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto)request,
14226                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto>specializeCallback(
14227                  done));
14228              return;
14229            case 6:
14230              this.startLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto)request,
14231                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto>specializeCallback(
14232                  done));
14233              return;
14234            case 7:
14235              this.finalizeLogSegment(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto)request,
14236                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto>specializeCallback(
14237                  done));
14238              return;
14239            case 8:
14240              this.purgeLogs(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto)request,
14241                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto>specializeCallback(
14242                  done));
14243              return;
14244            case 9:
14245              this.getEditLogManifest(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto)request,
14246                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto>specializeCallback(
14247                  done));
14248              return;
14249            case 10:
14250              this.prepareRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto)request,
14251                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto>specializeCallback(
14252                  done));
14253              return;
14254            case 11:
14255              this.acceptRecovery(controller, (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto)request,
14256                com.google.protobuf.RpcUtil.<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto>specializeCallback(
14257                  done));
14258              return;
14259            default:
14260              throw new java.lang.AssertionError("Can't get here.");
14261          }
14262        }
14263        
14264        public final com.google.protobuf.Message
14265            getRequestPrototype(
14266            com.google.protobuf.Descriptors.MethodDescriptor method) {
14267          if (method.getService() != getDescriptor()) {
14268            throw new java.lang.IllegalArgumentException(
14269              "Service.getRequestPrototype() given method " +
14270              "descriptor for wrong service type.");
14271          }
14272          switch(method.getIndex()) {
14273            case 0:
14274              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.getDefaultInstance();
14275            case 1:
14276              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.getDefaultInstance();
14277            case 2:
14278              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.getDefaultInstance();
14279            case 3:
14280              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.getDefaultInstance();
14281            case 4:
14282              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.getDefaultInstance();
14283            case 5:
14284              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.getDefaultInstance();
14285            case 6:
14286              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.getDefaultInstance();
14287            case 7:
14288              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.getDefaultInstance();
14289            case 8:
14290              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.getDefaultInstance();
14291            case 9:
14292              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.getDefaultInstance();
14293            case 10:
14294              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.getDefaultInstance();
14295            case 11:
14296              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.getDefaultInstance();
14297            default:
14298              throw new java.lang.AssertionError("Can't get here.");
14299          }
14300        }
14301        
14302        public final com.google.protobuf.Message
14303            getResponsePrototype(
14304            com.google.protobuf.Descriptors.MethodDescriptor method) {
14305          if (method.getService() != getDescriptor()) {
14306            throw new java.lang.IllegalArgumentException(
14307              "Service.getResponsePrototype() given method " +
14308              "descriptor for wrong service type.");
14309          }
14310          switch(method.getIndex()) {
14311            case 0:
14312              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance();
14313            case 1:
14314              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance();
14315            case 2:
14316              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance();
14317            case 3:
14318              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance();
14319            case 4:
14320              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance();
14321            case 5:
14322              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance();
14323            case 6:
14324              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance();
14325            case 7:
14326              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance();
14327            case 8:
14328              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance();
14329            case 9:
14330              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance();
14331            case 10:
14332              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance();
14333            case 11:
14334              return org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance();
14335            default:
14336              throw new java.lang.AssertionError("Can't get here.");
14337          }
14338        }
14339        
14340        public static Stub newStub(
14341            com.google.protobuf.RpcChannel channel) {
14342          return new Stub(channel);
14343        }
14344        
14345        public static final class Stub extends org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.QJournalProtocolService implements Interface {
14346          private Stub(com.google.protobuf.RpcChannel channel) {
14347            this.channel = channel;
14348          }
14349          
14350          private final com.google.protobuf.RpcChannel channel;
14351          
14352          public com.google.protobuf.RpcChannel getChannel() {
14353            return channel;
14354          }
14355          
14356          public  void isFormatted(
14357              com.google.protobuf.RpcController controller,
14358              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request,
14359              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto> done) {
14360            channel.callMethod(
14361              getDescriptor().getMethods().get(0),
14362              controller,
14363              request,
14364              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance(),
14365              com.google.protobuf.RpcUtil.generalizeCallback(
14366                done,
14367                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.class,
14368                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance()));
14369          }
14370          
14371          public  void getJournalState(
14372              com.google.protobuf.RpcController controller,
14373              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request,
14374              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto> done) {
14375            channel.callMethod(
14376              getDescriptor().getMethods().get(1),
14377              controller,
14378              request,
14379              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance(),
14380              com.google.protobuf.RpcUtil.generalizeCallback(
14381                done,
14382                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.class,
14383                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance()));
14384          }
14385          
14386          public  void newEpoch(
14387              com.google.protobuf.RpcController controller,
14388              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request,
14389              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto> done) {
14390            channel.callMethod(
14391              getDescriptor().getMethods().get(2),
14392              controller,
14393              request,
14394              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance(),
14395              com.google.protobuf.RpcUtil.generalizeCallback(
14396                done,
14397                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.class,
14398                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance()));
14399          }
14400          
14401          public  void format(
14402              com.google.protobuf.RpcController controller,
14403              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request,
14404              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto> done) {
14405            channel.callMethod(
14406              getDescriptor().getMethods().get(3),
14407              controller,
14408              request,
14409              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance(),
14410              com.google.protobuf.RpcUtil.generalizeCallback(
14411                done,
14412                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.class,
14413                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance()));
14414          }
14415          
14416          public  void journal(
14417              com.google.protobuf.RpcController controller,
14418              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request,
14419              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto> done) {
14420            channel.callMethod(
14421              getDescriptor().getMethods().get(4),
14422              controller,
14423              request,
14424              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance(),
14425              com.google.protobuf.RpcUtil.generalizeCallback(
14426                done,
14427                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.class,
14428                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance()));
14429          }
14430          
14431          public  void heartbeat(
14432              com.google.protobuf.RpcController controller,
14433              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request,
14434              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto> done) {
14435            channel.callMethod(
14436              getDescriptor().getMethods().get(5),
14437              controller,
14438              request,
14439              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance(),
14440              com.google.protobuf.RpcUtil.generalizeCallback(
14441                done,
14442                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.class,
14443                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance()));
14444          }
14445          
14446          public  void startLogSegment(
14447              com.google.protobuf.RpcController controller,
14448              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request,
14449              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto> done) {
14450            channel.callMethod(
14451              getDescriptor().getMethods().get(6),
14452              controller,
14453              request,
14454              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance(),
14455              com.google.protobuf.RpcUtil.generalizeCallback(
14456                done,
14457                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.class,
14458                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance()));
14459          }
14460          
14461          public  void finalizeLogSegment(
14462              com.google.protobuf.RpcController controller,
14463              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request,
14464              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto> done) {
14465            channel.callMethod(
14466              getDescriptor().getMethods().get(7),
14467              controller,
14468              request,
14469              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance(),
14470              com.google.protobuf.RpcUtil.generalizeCallback(
14471                done,
14472                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.class,
14473                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance()));
14474          }
14475          
14476          public  void purgeLogs(
14477              com.google.protobuf.RpcController controller,
14478              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request,
14479              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto> done) {
14480            channel.callMethod(
14481              getDescriptor().getMethods().get(8),
14482              controller,
14483              request,
14484              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance(),
14485              com.google.protobuf.RpcUtil.generalizeCallback(
14486                done,
14487                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.class,
14488                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance()));
14489          }
14490          
14491          public  void getEditLogManifest(
14492              com.google.protobuf.RpcController controller,
14493              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request,
14494              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto> done) {
14495            channel.callMethod(
14496              getDescriptor().getMethods().get(9),
14497              controller,
14498              request,
14499              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance(),
14500              com.google.protobuf.RpcUtil.generalizeCallback(
14501                done,
14502                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.class,
14503                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance()));
14504          }
14505          
14506          public  void prepareRecovery(
14507              com.google.protobuf.RpcController controller,
14508              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request,
14509              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto> done) {
14510            channel.callMethod(
14511              getDescriptor().getMethods().get(10),
14512              controller,
14513              request,
14514              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance(),
14515              com.google.protobuf.RpcUtil.generalizeCallback(
14516                done,
14517                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.class,
14518                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance()));
14519          }
14520          
14521          public  void acceptRecovery(
14522              com.google.protobuf.RpcController controller,
14523              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request,
14524              com.google.protobuf.RpcCallback<org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto> done) {
14525            channel.callMethod(
14526              getDescriptor().getMethods().get(11),
14527              controller,
14528              request,
14529              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance(),
14530              com.google.protobuf.RpcUtil.generalizeCallback(
14531                done,
14532                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.class,
14533                org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance()));
14534          }
14535        }
14536        
14537        public static BlockingInterface newBlockingStub(
14538            com.google.protobuf.BlockingRpcChannel channel) {
14539          return new BlockingStub(channel);
14540        }
14541        
14542        public interface BlockingInterface {
14543          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto isFormatted(
14544              com.google.protobuf.RpcController controller,
14545              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request)
14546              throws com.google.protobuf.ServiceException;
14547          
14548          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto getJournalState(
14549              com.google.protobuf.RpcController controller,
14550              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request)
14551              throws com.google.protobuf.ServiceException;
14552          
14553          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto newEpoch(
14554              com.google.protobuf.RpcController controller,
14555              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request)
14556              throws com.google.protobuf.ServiceException;
14557          
14558          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto format(
14559              com.google.protobuf.RpcController controller,
14560              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request)
14561              throws com.google.protobuf.ServiceException;
14562          
14563          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto journal(
14564              com.google.protobuf.RpcController controller,
14565              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request)
14566              throws com.google.protobuf.ServiceException;
14567          
14568          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto heartbeat(
14569              com.google.protobuf.RpcController controller,
14570              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request)
14571              throws com.google.protobuf.ServiceException;
14572          
14573          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto startLogSegment(
14574              com.google.protobuf.RpcController controller,
14575              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request)
14576              throws com.google.protobuf.ServiceException;
14577          
14578          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto finalizeLogSegment(
14579              com.google.protobuf.RpcController controller,
14580              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request)
14581              throws com.google.protobuf.ServiceException;
14582          
14583          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto purgeLogs(
14584              com.google.protobuf.RpcController controller,
14585              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request)
14586              throws com.google.protobuf.ServiceException;
14587          
14588          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto getEditLogManifest(
14589              com.google.protobuf.RpcController controller,
14590              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request)
14591              throws com.google.protobuf.ServiceException;
14592          
14593          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto prepareRecovery(
14594              com.google.protobuf.RpcController controller,
14595              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request)
14596              throws com.google.protobuf.ServiceException;
14597          
14598          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto acceptRecovery(
14599              com.google.protobuf.RpcController controller,
14600              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request)
14601              throws com.google.protobuf.ServiceException;
14602        }
14603        
14604        private static final class BlockingStub implements BlockingInterface {
14605          private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) {
14606            this.channel = channel;
14607          }
14608          
14609          private final com.google.protobuf.BlockingRpcChannel channel;
14610          
14611          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto isFormatted(
14612              com.google.protobuf.RpcController controller,
14613              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto request)
14614              throws com.google.protobuf.ServiceException {
14615            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto) channel.callBlockingMethod(
14616              getDescriptor().getMethods().get(0),
14617              controller,
14618              request,
14619              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.getDefaultInstance());
14620          }
14621          
14622          
14623          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto getJournalState(
14624              com.google.protobuf.RpcController controller,
14625              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto request)
14626              throws com.google.protobuf.ServiceException {
14627            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto) channel.callBlockingMethod(
14628              getDescriptor().getMethods().get(1),
14629              controller,
14630              request,
14631              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.getDefaultInstance());
14632          }
14633          
14634          
14635          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto newEpoch(
14636              com.google.protobuf.RpcController controller,
14637              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto request)
14638              throws com.google.protobuf.ServiceException {
14639            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto) channel.callBlockingMethod(
14640              getDescriptor().getMethods().get(2),
14641              controller,
14642              request,
14643              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.getDefaultInstance());
14644          }
14645          
14646          
14647          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto format(
14648              com.google.protobuf.RpcController controller,
14649              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto request)
14650              throws com.google.protobuf.ServiceException {
14651            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto) channel.callBlockingMethod(
14652              getDescriptor().getMethods().get(3),
14653              controller,
14654              request,
14655              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.getDefaultInstance());
14656          }
14657          
14658          
14659          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto journal(
14660              com.google.protobuf.RpcController controller,
14661              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto request)
14662              throws com.google.protobuf.ServiceException {
14663            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto) channel.callBlockingMethod(
14664              getDescriptor().getMethods().get(4),
14665              controller,
14666              request,
14667              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.getDefaultInstance());
14668          }
14669          
14670          
14671          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto heartbeat(
14672              com.google.protobuf.RpcController controller,
14673              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto request)
14674              throws com.google.protobuf.ServiceException {
14675            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto) channel.callBlockingMethod(
14676              getDescriptor().getMethods().get(5),
14677              controller,
14678              request,
14679              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.getDefaultInstance());
14680          }
14681          
14682          
14683          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto startLogSegment(
14684              com.google.protobuf.RpcController controller,
14685              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto request)
14686              throws com.google.protobuf.ServiceException {
14687            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto) channel.callBlockingMethod(
14688              getDescriptor().getMethods().get(6),
14689              controller,
14690              request,
14691              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.getDefaultInstance());
14692          }
14693          
14694          
14695          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto finalizeLogSegment(
14696              com.google.protobuf.RpcController controller,
14697              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto request)
14698              throws com.google.protobuf.ServiceException {
14699            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto) channel.callBlockingMethod(
14700              getDescriptor().getMethods().get(7),
14701              controller,
14702              request,
14703              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.getDefaultInstance());
14704          }
14705          
14706          
14707          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto purgeLogs(
14708              com.google.protobuf.RpcController controller,
14709              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto request)
14710              throws com.google.protobuf.ServiceException {
14711            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto) channel.callBlockingMethod(
14712              getDescriptor().getMethods().get(8),
14713              controller,
14714              request,
14715              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.getDefaultInstance());
14716          }
14717          
14718          
14719          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto getEditLogManifest(
14720              com.google.protobuf.RpcController controller,
14721              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto request)
14722              throws com.google.protobuf.ServiceException {
14723            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto) channel.callBlockingMethod(
14724              getDescriptor().getMethods().get(9),
14725              controller,
14726              request,
14727              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.getDefaultInstance());
14728          }
14729          
14730          
14731          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto prepareRecovery(
14732              com.google.protobuf.RpcController controller,
14733              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto request)
14734              throws com.google.protobuf.ServiceException {
14735            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto) channel.callBlockingMethod(
14736              getDescriptor().getMethods().get(10),
14737              controller,
14738              request,
14739              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.getDefaultInstance());
14740          }
14741          
14742          
14743          public org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto acceptRecovery(
14744              com.google.protobuf.RpcController controller,
14745              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto request)
14746              throws com.google.protobuf.ServiceException {
14747            return (org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto) channel.callBlockingMethod(
14748              getDescriptor().getMethods().get(11),
14749              controller,
14750              request,
14751              org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.getDefaultInstance());
14752          }
14753          
14754        }
14755      }
14756      
14757      private static com.google.protobuf.Descriptors.Descriptor
14758        internal_static_hadoop_hdfs_JournalIdProto_descriptor;
14759      private static
14760        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14761          internal_static_hadoop_hdfs_JournalIdProto_fieldAccessorTable;
14762      private static com.google.protobuf.Descriptors.Descriptor
14763        internal_static_hadoop_hdfs_RequestInfoProto_descriptor;
14764      private static
14765        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14766          internal_static_hadoop_hdfs_RequestInfoProto_fieldAccessorTable;
14767      private static com.google.protobuf.Descriptors.Descriptor
14768        internal_static_hadoop_hdfs_SegmentStateProto_descriptor;
14769      private static
14770        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14771          internal_static_hadoop_hdfs_SegmentStateProto_fieldAccessorTable;
14772      private static com.google.protobuf.Descriptors.Descriptor
14773        internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor;
14774      private static
14775        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14776          internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_fieldAccessorTable;
14777      private static com.google.protobuf.Descriptors.Descriptor
14778        internal_static_hadoop_hdfs_JournalRequestProto_descriptor;
14779      private static
14780        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14781          internal_static_hadoop_hdfs_JournalRequestProto_fieldAccessorTable;
14782      private static com.google.protobuf.Descriptors.Descriptor
14783        internal_static_hadoop_hdfs_JournalResponseProto_descriptor;
14784      private static
14785        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14786          internal_static_hadoop_hdfs_JournalResponseProto_fieldAccessorTable;
14787      private static com.google.protobuf.Descriptors.Descriptor
14788        internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor;
14789      private static
14790        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14791          internal_static_hadoop_hdfs_HeartbeatRequestProto_fieldAccessorTable;
14792      private static com.google.protobuf.Descriptors.Descriptor
14793        internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor;
14794      private static
14795        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14796          internal_static_hadoop_hdfs_HeartbeatResponseProto_fieldAccessorTable;
14797      private static com.google.protobuf.Descriptors.Descriptor
14798        internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor;
14799      private static
14800        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14801          internal_static_hadoop_hdfs_StartLogSegmentRequestProto_fieldAccessorTable;
14802      private static com.google.protobuf.Descriptors.Descriptor
14803        internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor;
14804      private static
14805        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14806          internal_static_hadoop_hdfs_StartLogSegmentResponseProto_fieldAccessorTable;
14807      private static com.google.protobuf.Descriptors.Descriptor
14808        internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor;
14809      private static
14810        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14811          internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_fieldAccessorTable;
14812      private static com.google.protobuf.Descriptors.Descriptor
14813        internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor;
14814      private static
14815        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14816          internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_fieldAccessorTable;
14817      private static com.google.protobuf.Descriptors.Descriptor
14818        internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor;
14819      private static
14820        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14821          internal_static_hadoop_hdfs_PurgeLogsRequestProto_fieldAccessorTable;
14822      private static com.google.protobuf.Descriptors.Descriptor
14823        internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor;
14824      private static
14825        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14826          internal_static_hadoop_hdfs_PurgeLogsResponseProto_fieldAccessorTable;
14827      private static com.google.protobuf.Descriptors.Descriptor
14828        internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor;
14829      private static
14830        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14831          internal_static_hadoop_hdfs_IsFormattedRequestProto_fieldAccessorTable;
14832      private static com.google.protobuf.Descriptors.Descriptor
14833        internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor;
14834      private static
14835        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14836          internal_static_hadoop_hdfs_IsFormattedResponseProto_fieldAccessorTable;
14837      private static com.google.protobuf.Descriptors.Descriptor
14838        internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor;
14839      private static
14840        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14841          internal_static_hadoop_hdfs_GetJournalStateRequestProto_fieldAccessorTable;
14842      private static com.google.protobuf.Descriptors.Descriptor
14843        internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor;
14844      private static
14845        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14846          internal_static_hadoop_hdfs_GetJournalStateResponseProto_fieldAccessorTable;
14847      private static com.google.protobuf.Descriptors.Descriptor
14848        internal_static_hadoop_hdfs_FormatRequestProto_descriptor;
14849      private static
14850        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14851          internal_static_hadoop_hdfs_FormatRequestProto_fieldAccessorTable;
14852      private static com.google.protobuf.Descriptors.Descriptor
14853        internal_static_hadoop_hdfs_FormatResponseProto_descriptor;
14854      private static
14855        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14856          internal_static_hadoop_hdfs_FormatResponseProto_fieldAccessorTable;
14857      private static com.google.protobuf.Descriptors.Descriptor
14858        internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor;
14859      private static
14860        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14861          internal_static_hadoop_hdfs_NewEpochRequestProto_fieldAccessorTable;
14862      private static com.google.protobuf.Descriptors.Descriptor
14863        internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor;
14864      private static
14865        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14866          internal_static_hadoop_hdfs_NewEpochResponseProto_fieldAccessorTable;
14867      private static com.google.protobuf.Descriptors.Descriptor
14868        internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor;
14869      private static
14870        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14871          internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_fieldAccessorTable;
14872      private static com.google.protobuf.Descriptors.Descriptor
14873        internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor;
14874      private static
14875        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14876          internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_fieldAccessorTable;
14877      private static com.google.protobuf.Descriptors.Descriptor
14878        internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor;
14879      private static
14880        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14881          internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_fieldAccessorTable;
14882      private static com.google.protobuf.Descriptors.Descriptor
14883        internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor;
14884      private static
14885        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14886          internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_fieldAccessorTable;
14887      private static com.google.protobuf.Descriptors.Descriptor
14888        internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor;
14889      private static
14890        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14891          internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_fieldAccessorTable;
14892      private static com.google.protobuf.Descriptors.Descriptor
14893        internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor;
14894      private static
14895        com.google.protobuf.GeneratedMessage.FieldAccessorTable
14896          internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_fieldAccessorTable;
14897      
14898      public static com.google.protobuf.Descriptors.FileDescriptor
14899          getDescriptor() {
14900        return descriptor;
14901      }
14902      private static com.google.protobuf.Descriptors.FileDescriptor
14903          descriptor;
14904      static {
14905        java.lang.String[] descriptorData = {
14906          "\n\026QJournalProtocol.proto\022\013hadoop.hdfs\032\nh" +
14907          "dfs.proto\"$\n\016JournalIdProto\022\022\n\nidentifie" +
14908          "r\030\001 \002(\t\"\201\001\n\020RequestInfoProto\022.\n\tjournalI" +
14909          "d\030\001 \002(\0132\033.hadoop.hdfs.JournalIdProto\022\r\n\005" +
14910          "epoch\030\002 \002(\004\022\027\n\017ipcSerialNumber\030\003 \002(\004\022\025\n\r" +
14911          "committedTxId\030\004 \001(\004\"M\n\021SegmentStateProto" +
14912          "\022\021\n\tstartTxId\030\001 \002(\004\022\017\n\007endTxId\030\002 \002(\004\022\024\n\014" +
14913          "isInProgress\030\003 \002(\010\"k\n\032PersistedRecoveryP" +
14914          "axosData\0224\n\014segmentState\030\001 \002(\0132\036.hadoop." +
14915          "hdfs.SegmentStateProto\022\027\n\017acceptedInEpoc",
14916          "h\030\002 \002(\004\"\221\001\n\023JournalRequestProto\022.\n\007reqIn" +
14917          "fo\030\001 \002(\0132\035.hadoop.hdfs.RequestInfoProto\022" +
14918          "\022\n\nfirstTxnId\030\002 \002(\004\022\017\n\007numTxns\030\003 \002(\r\022\017\n\007" +
14919          "records\030\004 \002(\014\022\024\n\014segmentTxnId\030\005 \002(\004\"\026\n\024J" +
14920          "ournalResponseProto\"G\n\025HeartbeatRequestP" +
14921          "roto\022.\n\007reqInfo\030\001 \002(\0132\035.hadoop.hdfs.Requ" +
14922          "estInfoProto\"\030\n\026HeartbeatResponseProto\"[" +
14923          "\n\033StartLogSegmentRequestProto\022.\n\007reqInfo" +
14924          "\030\001 \002(\0132\035.hadoop.hdfs.RequestInfoProto\022\014\n" +
14925          "\004txid\030\002 \002(\004\"\036\n\034StartLogSegmentResponsePr",
14926          "oto\"t\n\036FinalizeLogSegmentRequestProto\022.\n" +
14927          "\007reqInfo\030\001 \002(\0132\035.hadoop.hdfs.RequestInfo" +
14928          "Proto\022\021\n\tstartTxId\030\002 \002(\004\022\017\n\007endTxId\030\003 \002(" +
14929          "\004\"!\n\037FinalizeLogSegmentResponseProto\"^\n\025" +
14930          "PurgeLogsRequestProto\022.\n\007reqInfo\030\001 \002(\0132\035" +
14931          ".hadoop.hdfs.RequestInfoProto\022\025\n\rminTxId" +
14932          "ToKeep\030\002 \002(\004\"\030\n\026PurgeLogsResponseProto\"C" +
14933          "\n\027IsFormattedRequestProto\022(\n\003jid\030\001 \002(\0132\033" +
14934          ".hadoop.hdfs.JournalIdProto\"/\n\030IsFormatt" +
14935          "edResponseProto\022\023\n\013isFormatted\030\001 \002(\010\"G\n\033",
14936          "GetJournalStateRequestProto\022(\n\003jid\030\001 \002(\013" +
14937          "2\033.hadoop.hdfs.JournalIdProto\"K\n\034GetJour" +
14938          "nalStateResponseProto\022\031\n\021lastPromisedEpo" +
14939          "ch\030\001 \002(\004\022\020\n\010httpPort\030\002 \002(\r\"o\n\022FormatRequ" +
14940          "estProto\022(\n\003jid\030\001 \002(\0132\033.hadoop.hdfs.Jour" +
14941          "nalIdProto\022/\n\006nsInfo\030\002 \002(\0132\037.hadoop.hdfs" +
14942          ".NamespaceInfoProto\"\025\n\023FormatResponsePro" +
14943          "to\"\200\001\n\024NewEpochRequestProto\022(\n\003jid\030\001 \002(\013" +
14944          "2\033.hadoop.hdfs.JournalIdProto\022/\n\006nsInfo\030" +
14945          "\002 \002(\0132\037.hadoop.hdfs.NamespaceInfoProto\022\r",
14946          "\n\005epoch\030\003 \002(\004\"0\n\025NewEpochResponseProto\022\027" +
14947          "\n\017lastSegmentTxId\030\001 \001(\004\"]\n\036GetEditLogMan" +
14948          "ifestRequestProto\022(\n\003jid\030\001 \002(\0132\033.hadoop." +
14949          "hdfs.JournalIdProto\022\021\n\tsinceTxId\030\002 \002(\004\"n" +
14950          "\n\037GetEditLogManifestResponseProto\0229\n\010man" +
14951          "ifest\030\001 \002(\0132\'.hadoop.hdfs.RemoteEditLogM" +
14952          "anifestProto\022\020\n\010httpPort\030\002 \002(\r\"b\n\033Prepar" +
14953          "eRecoveryRequestProto\022.\n\007reqInfo\030\001 \002(\0132\035" +
14954          ".hadoop.hdfs.RequestInfoProto\022\023\n\013segment" +
14955          "TxId\030\002 \002(\004\"\241\001\n\034PrepareRecoveryResponsePr",
14956          "oto\0224\n\014segmentState\030\001 \001(\0132\036.hadoop.hdfs." +
14957          "SegmentStateProto\022\027\n\017acceptedInEpoch\030\002 \001" +
14958          "(\004\022\027\n\017lastWriterEpoch\030\003 \002(\004\022\031\n\021lastCommi" +
14959          "ttedTxId\030\004 \001(\004\"\224\001\n\032AcceptRecoveryRequest" +
14960          "Proto\022.\n\007reqInfo\030\001 \002(\0132\035.hadoop.hdfs.Req" +
14961          "uestInfoProto\0225\n\rstateToAccept\030\002 \002(\0132\036.h" +
14962          "adoop.hdfs.SegmentStateProto\022\017\n\007fromURL\030" +
14963          "\003 \002(\t\"\035\n\033AcceptRecoveryResponseProto2\220\t\n" +
14964          "\027QJournalProtocolService\022Z\n\013isFormatted\022" +
14965          "$.hadoop.hdfs.IsFormattedRequestProto\032%.",
14966          "hadoop.hdfs.IsFormattedResponseProto\022f\n\017" +
14967          "getJournalState\022(.hadoop.hdfs.GetJournal" +
14968          "StateRequestProto\032).hadoop.hdfs.GetJourn" +
14969          "alStateResponseProto\022Q\n\010newEpoch\022!.hadoo" +
14970          "p.hdfs.NewEpochRequestProto\032\".hadoop.hdf" +
14971          "s.NewEpochResponseProto\022K\n\006format\022\037.hado" +
14972          "op.hdfs.FormatRequestProto\032 .hadoop.hdfs" +
14973          ".FormatResponseProto\022N\n\007journal\022 .hadoop" +
14974          ".hdfs.JournalRequestProto\032!.hadoop.hdfs." +
14975          "JournalResponseProto\022T\n\theartbeat\022\".hado",
14976          "op.hdfs.HeartbeatRequestProto\032#.hadoop.h" +
14977          "dfs.HeartbeatResponseProto\022f\n\017startLogSe" +
14978          "gment\022(.hadoop.hdfs.StartLogSegmentReque" +
14979          "stProto\032).hadoop.hdfs.StartLogSegmentRes" +
14980          "ponseProto\022o\n\022finalizeLogSegment\022+.hadoo" +
14981          "p.hdfs.FinalizeLogSegmentRequestProto\032,." +
14982          "hadoop.hdfs.FinalizeLogSegmentResponsePr" +
14983          "oto\022T\n\tpurgeLogs\022\".hadoop.hdfs.PurgeLogs" +
14984          "RequestProto\032#.hadoop.hdfs.PurgeLogsResp" +
14985          "onseProto\022o\n\022getEditLogManifest\022+.hadoop",
14986          ".hdfs.GetEditLogManifestRequestProto\032,.h" +
14987          "adoop.hdfs.GetEditLogManifestResponsePro" +
14988          "to\022f\n\017prepareRecovery\022(.hadoop.hdfs.Prep" +
14989          "areRecoveryRequestProto\032).hadoop.hdfs.Pr" +
14990          "epareRecoveryResponseProto\022c\n\016acceptReco" +
14991          "very\022\'.hadoop.hdfs.AcceptRecoveryRequest" +
14992          "Proto\032(.hadoop.hdfs.AcceptRecoveryRespon" +
14993          "seProtoBH\n(org.apache.hadoop.hdfs.qjourn" +
14994          "al.protocolB\026QJournalProtocolProtos\210\001\001\240\001" +
14995          "\001"
14996        };
14997        com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
14998          new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
14999            public com.google.protobuf.ExtensionRegistry assignDescriptors(
15000                com.google.protobuf.Descriptors.FileDescriptor root) {
15001              descriptor = root;
15002              internal_static_hadoop_hdfs_JournalIdProto_descriptor =
15003                getDescriptor().getMessageTypes().get(0);
15004              internal_static_hadoop_hdfs_JournalIdProto_fieldAccessorTable = new
15005                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15006                  internal_static_hadoop_hdfs_JournalIdProto_descriptor,
15007                  new java.lang.String[] { "Identifier", },
15008                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.class,
15009                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalIdProto.Builder.class);
15010              internal_static_hadoop_hdfs_RequestInfoProto_descriptor =
15011                getDescriptor().getMessageTypes().get(1);
15012              internal_static_hadoop_hdfs_RequestInfoProto_fieldAccessorTable = new
15013                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15014                  internal_static_hadoop_hdfs_RequestInfoProto_descriptor,
15015                  new java.lang.String[] { "JournalId", "Epoch", "IpcSerialNumber", "CommittedTxId", },
15016                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.class,
15017                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.RequestInfoProto.Builder.class);
15018              internal_static_hadoop_hdfs_SegmentStateProto_descriptor =
15019                getDescriptor().getMessageTypes().get(2);
15020              internal_static_hadoop_hdfs_SegmentStateProto_fieldAccessorTable = new
15021                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15022                  internal_static_hadoop_hdfs_SegmentStateProto_descriptor,
15023                  new java.lang.String[] { "StartTxId", "EndTxId", "IsInProgress", },
15024                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.class,
15025                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.SegmentStateProto.Builder.class);
15026              internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor =
15027                getDescriptor().getMessageTypes().get(3);
15028              internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_fieldAccessorTable = new
15029                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15030                  internal_static_hadoop_hdfs_PersistedRecoveryPaxosData_descriptor,
15031                  new java.lang.String[] { "SegmentState", "AcceptedInEpoch", },
15032                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.class,
15033                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData.Builder.class);
15034              internal_static_hadoop_hdfs_JournalRequestProto_descriptor =
15035                getDescriptor().getMessageTypes().get(4);
15036              internal_static_hadoop_hdfs_JournalRequestProto_fieldAccessorTable = new
15037                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15038                  internal_static_hadoop_hdfs_JournalRequestProto_descriptor,
15039                  new java.lang.String[] { "ReqInfo", "FirstTxnId", "NumTxns", "Records", "SegmentTxnId", },
15040                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.class,
15041                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalRequestProto.Builder.class);
15042              internal_static_hadoop_hdfs_JournalResponseProto_descriptor =
15043                getDescriptor().getMessageTypes().get(5);
15044              internal_static_hadoop_hdfs_JournalResponseProto_fieldAccessorTable = new
15045                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15046                  internal_static_hadoop_hdfs_JournalResponseProto_descriptor,
15047                  new java.lang.String[] { },
15048                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.class,
15049                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.JournalResponseProto.Builder.class);
15050              internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor =
15051                getDescriptor().getMessageTypes().get(6);
15052              internal_static_hadoop_hdfs_HeartbeatRequestProto_fieldAccessorTable = new
15053                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15054                  internal_static_hadoop_hdfs_HeartbeatRequestProto_descriptor,
15055                  new java.lang.String[] { "ReqInfo", },
15056                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.class,
15057                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatRequestProto.Builder.class);
15058              internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor =
15059                getDescriptor().getMessageTypes().get(7);
15060              internal_static_hadoop_hdfs_HeartbeatResponseProto_fieldAccessorTable = new
15061                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15062                  internal_static_hadoop_hdfs_HeartbeatResponseProto_descriptor,
15063                  new java.lang.String[] { },
15064                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.class,
15065                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.HeartbeatResponseProto.Builder.class);
15066              internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor =
15067                getDescriptor().getMessageTypes().get(8);
15068              internal_static_hadoop_hdfs_StartLogSegmentRequestProto_fieldAccessorTable = new
15069                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15070                  internal_static_hadoop_hdfs_StartLogSegmentRequestProto_descriptor,
15071                  new java.lang.String[] { "ReqInfo", "Txid", },
15072                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.class,
15073                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentRequestProto.Builder.class);
15074              internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor =
15075                getDescriptor().getMessageTypes().get(9);
15076              internal_static_hadoop_hdfs_StartLogSegmentResponseProto_fieldAccessorTable = new
15077                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15078                  internal_static_hadoop_hdfs_StartLogSegmentResponseProto_descriptor,
15079                  new java.lang.String[] { },
15080                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.class,
15081                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.StartLogSegmentResponseProto.Builder.class);
15082              internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor =
15083                getDescriptor().getMessageTypes().get(10);
15084              internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_fieldAccessorTable = new
15085                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15086                  internal_static_hadoop_hdfs_FinalizeLogSegmentRequestProto_descriptor,
15087                  new java.lang.String[] { "ReqInfo", "StartTxId", "EndTxId", },
15088                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.class,
15089                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentRequestProto.Builder.class);
15090              internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor =
15091                getDescriptor().getMessageTypes().get(11);
15092              internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_fieldAccessorTable = new
15093                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15094                  internal_static_hadoop_hdfs_FinalizeLogSegmentResponseProto_descriptor,
15095                  new java.lang.String[] { },
15096                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.class,
15097                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FinalizeLogSegmentResponseProto.Builder.class);
15098              internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor =
15099                getDescriptor().getMessageTypes().get(12);
15100              internal_static_hadoop_hdfs_PurgeLogsRequestProto_fieldAccessorTable = new
15101                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15102                  internal_static_hadoop_hdfs_PurgeLogsRequestProto_descriptor,
15103                  new java.lang.String[] { "ReqInfo", "MinTxIdToKeep", },
15104                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.class,
15105                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsRequestProto.Builder.class);
15106              internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor =
15107                getDescriptor().getMessageTypes().get(13);
15108              internal_static_hadoop_hdfs_PurgeLogsResponseProto_fieldAccessorTable = new
15109                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15110                  internal_static_hadoop_hdfs_PurgeLogsResponseProto_descriptor,
15111                  new java.lang.String[] { },
15112                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.class,
15113                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PurgeLogsResponseProto.Builder.class);
15114              internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor =
15115                getDescriptor().getMessageTypes().get(14);
15116              internal_static_hadoop_hdfs_IsFormattedRequestProto_fieldAccessorTable = new
15117                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15118                  internal_static_hadoop_hdfs_IsFormattedRequestProto_descriptor,
15119                  new java.lang.String[] { "Jid", },
15120                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.class,
15121                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedRequestProto.Builder.class);
15122              internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor =
15123                getDescriptor().getMessageTypes().get(15);
15124              internal_static_hadoop_hdfs_IsFormattedResponseProto_fieldAccessorTable = new
15125                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15126                  internal_static_hadoop_hdfs_IsFormattedResponseProto_descriptor,
15127                  new java.lang.String[] { "IsFormatted", },
15128                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.class,
15129                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.IsFormattedResponseProto.Builder.class);
15130              internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor =
15131                getDescriptor().getMessageTypes().get(16);
15132              internal_static_hadoop_hdfs_GetJournalStateRequestProto_fieldAccessorTable = new
15133                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15134                  internal_static_hadoop_hdfs_GetJournalStateRequestProto_descriptor,
15135                  new java.lang.String[] { "Jid", },
15136                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.class,
15137                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateRequestProto.Builder.class);
15138              internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor =
15139                getDescriptor().getMessageTypes().get(17);
15140              internal_static_hadoop_hdfs_GetJournalStateResponseProto_fieldAccessorTable = new
15141                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15142                  internal_static_hadoop_hdfs_GetJournalStateResponseProto_descriptor,
15143                  new java.lang.String[] { "LastPromisedEpoch", "HttpPort", },
15144                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.class,
15145                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetJournalStateResponseProto.Builder.class);
15146              internal_static_hadoop_hdfs_FormatRequestProto_descriptor =
15147                getDescriptor().getMessageTypes().get(18);
15148              internal_static_hadoop_hdfs_FormatRequestProto_fieldAccessorTable = new
15149                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15150                  internal_static_hadoop_hdfs_FormatRequestProto_descriptor,
15151                  new java.lang.String[] { "Jid", "NsInfo", },
15152                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.class,
15153                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatRequestProto.Builder.class);
15154              internal_static_hadoop_hdfs_FormatResponseProto_descriptor =
15155                getDescriptor().getMessageTypes().get(19);
15156              internal_static_hadoop_hdfs_FormatResponseProto_fieldAccessorTable = new
15157                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15158                  internal_static_hadoop_hdfs_FormatResponseProto_descriptor,
15159                  new java.lang.String[] { },
15160                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.class,
15161                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.FormatResponseProto.Builder.class);
15162              internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor =
15163                getDescriptor().getMessageTypes().get(20);
15164              internal_static_hadoop_hdfs_NewEpochRequestProto_fieldAccessorTable = new
15165                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15166                  internal_static_hadoop_hdfs_NewEpochRequestProto_descriptor,
15167                  new java.lang.String[] { "Jid", "NsInfo", "Epoch", },
15168                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.class,
15169                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochRequestProto.Builder.class);
15170              internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor =
15171                getDescriptor().getMessageTypes().get(21);
15172              internal_static_hadoop_hdfs_NewEpochResponseProto_fieldAccessorTable = new
15173                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15174                  internal_static_hadoop_hdfs_NewEpochResponseProto_descriptor,
15175                  new java.lang.String[] { "LastSegmentTxId", },
15176                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.class,
15177                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto.Builder.class);
15178              internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor =
15179                getDescriptor().getMessageTypes().get(22);
15180              internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_fieldAccessorTable = new
15181                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15182                  internal_static_hadoop_hdfs_GetEditLogManifestRequestProto_descriptor,
15183                  new java.lang.String[] { "Jid", "SinceTxId", },
15184                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.class,
15185                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestRequestProto.Builder.class);
15186              internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor =
15187                getDescriptor().getMessageTypes().get(23);
15188              internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_fieldAccessorTable = new
15189                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15190                  internal_static_hadoop_hdfs_GetEditLogManifestResponseProto_descriptor,
15191                  new java.lang.String[] { "Manifest", "HttpPort", },
15192                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.class,
15193                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.GetEditLogManifestResponseProto.Builder.class);
15194              internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor =
15195                getDescriptor().getMessageTypes().get(24);
15196              internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_fieldAccessorTable = new
15197                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15198                  internal_static_hadoop_hdfs_PrepareRecoveryRequestProto_descriptor,
15199                  new java.lang.String[] { "ReqInfo", "SegmentTxId", },
15200                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.class,
15201                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryRequestProto.Builder.class);
15202              internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor =
15203                getDescriptor().getMessageTypes().get(25);
15204              internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_fieldAccessorTable = new
15205                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15206                  internal_static_hadoop_hdfs_PrepareRecoveryResponseProto_descriptor,
15207                  new java.lang.String[] { "SegmentState", "AcceptedInEpoch", "LastWriterEpoch", "LastCommittedTxId", },
15208                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.class,
15209                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto.Builder.class);
15210              internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor =
15211                getDescriptor().getMessageTypes().get(26);
15212              internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_fieldAccessorTable = new
15213                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15214                  internal_static_hadoop_hdfs_AcceptRecoveryRequestProto_descriptor,
15215                  new java.lang.String[] { "ReqInfo", "StateToAccept", "FromURL", },
15216                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.class,
15217                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryRequestProto.Builder.class);
15218              internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor =
15219                getDescriptor().getMessageTypes().get(27);
15220              internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_fieldAccessorTable = new
15221                com.google.protobuf.GeneratedMessage.FieldAccessorTable(
15222                  internal_static_hadoop_hdfs_AcceptRecoveryResponseProto_descriptor,
15223                  new java.lang.String[] { },
15224                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.class,
15225                  org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.AcceptRecoveryResponseProto.Builder.class);
15226              return null;
15227            }
15228          };
15229        com.google.protobuf.Descriptors.FileDescriptor
15230          .internalBuildGeneratedFileFrom(descriptorData,
15231            new com.google.protobuf.Descriptors.FileDescriptor[] {
15232              org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.getDescriptor(),
15233            }, assigner);
15234      }
15235      
15236      // @@protoc_insertion_point(outer_class_scope)
15237    }