001    /**
002     * Licensed to the Apache Software Foundation (ASF) under one
003     * or more contributor license agreements.  See the NOTICE file
004     * distributed with this work for additional information
005     * regarding copyright ownership.  The ASF licenses this file
006     * to you under the Apache License, Version 2.0 (the
007     * "License"); you may not use this file except in compliance
008     * with the License.  You may obtain a copy of the License at
009     *
010     *     http://www.apache.org/licenses/LICENSE-2.0
011     *
012     * Unless required by applicable law or agreed to in writing, software
013     * distributed under the License is distributed on an "AS IS" BASIS,
014     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015     * See the License for the specific language governing permissions and
016     * limitations under the License.
017     */
018    package org.apache.hadoop.hdfs.server.common;
019    
020    import java.io.DataInput;
021    import java.io.DataOutput;
022    import java.io.IOException;
023    import java.util.regex.Matcher;
024    import java.util.regex.Pattern;
025    
026    import org.apache.hadoop.classification.InterfaceAudience;
027    import org.apache.hadoop.hdfs.server.namenode.MetaRecoveryContext;
028    
029    import com.google.common.base.Preconditions;
030    
031    /************************************
032     * Some handy internal HDFS constants
033     *
034     ************************************/
035    
036    @InterfaceAudience.Private
037    public final class HdfsServerConstants {
038      /* Hidden constructor */
039      private HdfsServerConstants() { }
040      
041      /**
042       * Type of the node
043       */
044      static public enum NodeType {
045        NAME_NODE,
046        DATA_NODE,
047        JOURNAL_NODE;
048      }
049    
050      /** Startup options for rolling upgrade. */
051      public static enum RollingUpgradeStartupOption{
052        ROLLBACK, DOWNGRADE, STARTED;
053    
054        public String getOptionString() {
055          return StartupOption.ROLLINGUPGRADE.getName() + " "
056              + name().toLowerCase();
057        }
058    
059        public boolean matches(StartupOption option) {
060          return option == StartupOption.ROLLINGUPGRADE
061              && option.getRollingUpgradeStartupOption() == this;
062        }
063    
064        private static final RollingUpgradeStartupOption[] VALUES = values();
065    
066        static RollingUpgradeStartupOption fromString(String s) {
067          for(RollingUpgradeStartupOption opt : VALUES) {
068            if (opt.name().equalsIgnoreCase(s)) {
069              return opt;
070            }
071          }
072          throw new IllegalArgumentException("Failed to convert \"" + s
073              + "\" to " + RollingUpgradeStartupOption.class.getSimpleName());
074        }
075    
076        public static String getAllOptionString() {
077          final StringBuilder b = new StringBuilder("<");
078          for(RollingUpgradeStartupOption opt : VALUES) {
079            b.append(opt.name().toLowerCase()).append("|");
080          }
081          b.setCharAt(b.length() - 1, '>');
082          return b.toString();
083        }
084      }
085    
086      /** Startup options */
087      static public enum StartupOption{
088        FORMAT  ("-format"),
089        CLUSTERID ("-clusterid"),
090        GENCLUSTERID ("-genclusterid"),
091        REGULAR ("-regular"),
092        BACKUP  ("-backup"),
093        CHECKPOINT("-checkpoint"),
094        UPGRADE ("-upgrade"),
095        ROLLBACK("-rollback"),
096        FINALIZE("-finalize"),
097        ROLLINGUPGRADE("-rollingUpgrade"),
098        IMPORT  ("-importCheckpoint"),
099        BOOTSTRAPSTANDBY("-bootstrapStandby"),
100        INITIALIZESHAREDEDITS("-initializeSharedEdits"),
101        RECOVER  ("-recover"),
102        FORCE("-force"),
103        NONINTERACTIVE("-nonInteractive"),
104        RENAMERESERVED("-renameReserved"),
105        METADATAVERSION("-metadataVersion"),
106        UPGRADEONLY("-upgradeOnly"),
107        // The -hotswap constant should not be used as a startup option, it is
108        // only used for StorageDirectory.analyzeStorage() in hot swap drive scenario.
109        // TODO refactor StorageDirectory.analyzeStorage() so that we can do away with
110        // this in StartupOption.
111        HOTSWAP("-hotswap");
112    
113        private static final Pattern ENUM_WITH_ROLLING_UPGRADE_OPTION = Pattern.compile(
114            "(\\w+)\\((\\w+)\\)");
115    
116        private final String name;
117        
118        // Used only with format and upgrade options
119        private String clusterId = null;
120        
121        // Used only by rolling upgrade
122        private RollingUpgradeStartupOption rollingUpgradeStartupOption;
123    
124        // Used only with format option
125        private boolean isForceFormat = false;
126        private boolean isInteractiveFormat = true;
127        
128        // Used only with recovery option
129        private int force = 0;
130    
131        private StartupOption(String arg) {this.name = arg;}
132        public String getName() {return name;}
133        public NamenodeRole toNodeRole() {
134          switch(this) {
135          case BACKUP: 
136            return NamenodeRole.BACKUP;
137          case CHECKPOINT: 
138            return NamenodeRole.CHECKPOINT;
139          default:
140            return NamenodeRole.NAMENODE;
141          }
142        }
143        
144        public void setClusterId(String cid) {
145          clusterId = cid;
146        }
147    
148        public String getClusterId() {
149          return clusterId;
150        }
151        
152        public void setRollingUpgradeStartupOption(String opt) {
153          Preconditions.checkState(this == ROLLINGUPGRADE);
154          rollingUpgradeStartupOption = RollingUpgradeStartupOption.fromString(opt);
155        }
156        
157        public RollingUpgradeStartupOption getRollingUpgradeStartupOption() {
158          Preconditions.checkState(this == ROLLINGUPGRADE);
159          return rollingUpgradeStartupOption;
160        }
161    
162        public MetaRecoveryContext createRecoveryContext() {
163          if (!name.equals(RECOVER.name))
164            return null;
165          return new MetaRecoveryContext(force);
166        }
167    
168        public void setForce(int force) {
169          this.force = force;
170        }
171        
172        public int getForce() {
173          return this.force;
174        }
175        
176        public boolean getForceFormat() {
177          return isForceFormat;
178        }
179        
180        public void setForceFormat(boolean force) {
181          isForceFormat = force;
182        }
183        
184        public boolean getInteractiveFormat() {
185          return isInteractiveFormat;
186        }
187        
188        public void setInteractiveFormat(boolean interactive) {
189          isInteractiveFormat = interactive;
190        }
191        
192        @Override
193        public String toString() {
194          if (this == ROLLINGUPGRADE) {
195            return new StringBuilder(super.toString())
196                .append("(").append(getRollingUpgradeStartupOption()).append(")")
197                .toString();
198          }
199          return super.toString();
200        }
201    
202        static public StartupOption getEnum(String value) {
203          Matcher matcher = ENUM_WITH_ROLLING_UPGRADE_OPTION.matcher(value);
204          if (matcher.matches()) {
205            StartupOption option = StartupOption.valueOf(matcher.group(1));
206            option.setRollingUpgradeStartupOption(matcher.group(2));
207            return option;
208          } else {
209            return StartupOption.valueOf(value);
210          }
211        }
212      }
213    
214      // Timeouts for communicating with DataNode for streaming writes/reads
215      public static final int READ_TIMEOUT = 60 * 1000;
216      public static final int READ_TIMEOUT_EXTENSION = 5 * 1000;
217      public static final int WRITE_TIMEOUT = 8 * 60 * 1000;
218      public static final int WRITE_TIMEOUT_EXTENSION = 5 * 1000; //for write pipeline
219    
220      /**
221       * Defines the NameNode role.
222       */
223      static public enum NamenodeRole {
224        NAMENODE  ("NameNode"),
225        BACKUP    ("Backup Node"),
226        CHECKPOINT("Checkpoint Node");
227    
228        private String description = null;
229        private NamenodeRole(String arg) {this.description = arg;}
230      
231        @Override
232        public String toString() {
233          return description;
234        }
235      }
236    
237      /**
238       * Block replica states, which it can go through while being constructed.
239       */
240      static public enum ReplicaState {
241        /** Replica is finalized. The state when replica is not modified. */
242        FINALIZED(0),
243        /** Replica is being written to. */
244        RBW(1),
245        /** Replica is waiting to be recovered. */
246        RWR(2),
247        /** Replica is under recovery. */
248        RUR(3),
249        /** Temporary replica: created for replication and relocation only. */
250        TEMPORARY(4);
251    
252        private final int value;
253    
254        private ReplicaState(int v) {
255          value = v;
256        }
257    
258        public int getValue() {
259          return value;
260        }
261    
262        public static ReplicaState getState(int v) {
263          return ReplicaState.values()[v];
264        }
265    
266        /** Read from in */
267        public static ReplicaState read(DataInput in) throws IOException {
268          return values()[in.readByte()];
269        }
270    
271        /** Write to out */
272        public void write(DataOutput out) throws IOException {
273          out.writeByte(ordinal());
274        }
275      }
276    
277      /**
278       * States, which a block can go through while it is under construction.
279       */
280      static public enum BlockUCState {
281        /**
282         * Block construction completed.<br>
283         * The block has at least one {@link ReplicaState#FINALIZED} replica,
284         * and is not going to be modified.
285         */
286        COMPLETE,
287        /**
288         * The block is under construction.<br>
289         * It has been recently allocated for write or append.
290         */
291        UNDER_CONSTRUCTION,
292        /**
293         * The block is under recovery.<br>
294         * When a file lease expires its last block may not be {@link #COMPLETE}
295         * and needs to go through a recovery procedure, 
296         * which synchronizes the existing replicas contents.
297         */
298        UNDER_RECOVERY,
299        /**
300         * The block is committed.<br>
301         * The client reported that all bytes are written to data-nodes
302         * with the given generation stamp and block length, but no 
303         * {@link ReplicaState#FINALIZED} 
304         * replicas has yet been reported by data-nodes themselves.
305         */
306        COMMITTED;
307      }
308      
309      public static final String NAMENODE_LEASE_HOLDER = "HDFS_NameNode";
310      public static final long NAMENODE_LEASE_RECHECK_INTERVAL = 2000;
311    
312      public static final String CRYPTO_XATTR_ENCRYPTION_ZONE =
313          "raw.hdfs.crypto.encryption.zone";
314      public static final String CRYPTO_XATTR_FILE_ENCRYPTION_INFO =
315          "raw.hdfs.crypto.file.encryption.info";
316      public static final String SECURITY_XATTR_UNREADABLE_BY_SUPERUSER =
317          "security.hdfs.unreadable.by.superuser";
318    }