001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hdfs.server.common;
019
020import java.io.DataInput;
021import java.io.DataOutput;
022import java.io.IOException;
023import java.util.regex.Matcher;
024import java.util.regex.Pattern;
025
026import org.apache.hadoop.classification.InterfaceAudience;
027import org.apache.hadoop.hdfs.server.namenode.MetaRecoveryContext;
028
029import com.google.common.base.Preconditions;
030
031/************************************
032 * Some handy internal HDFS constants
033 *
034 ************************************/
035
036@InterfaceAudience.Private
037public final class HdfsServerConstants {
038  /* Hidden constructor */
039  private HdfsServerConstants() { }
040  
041  /**
042   * Type of the node
043   */
044  static public enum NodeType {
045    NAME_NODE,
046    DATA_NODE,
047    JOURNAL_NODE;
048  }
049
050  /** Startup options for rolling upgrade. */
051  public static enum RollingUpgradeStartupOption{
052    ROLLBACK, DOWNGRADE, STARTED;
053
054    public String getOptionString() {
055      return StartupOption.ROLLINGUPGRADE.getName() + " "
056          + name().toLowerCase();
057    }
058
059    public boolean matches(StartupOption option) {
060      return option == StartupOption.ROLLINGUPGRADE
061          && option.getRollingUpgradeStartupOption() == this;
062    }
063
064    private static final RollingUpgradeStartupOption[] VALUES = values();
065
066    static RollingUpgradeStartupOption fromString(String s) {
067      for(RollingUpgradeStartupOption opt : VALUES) {
068        if (opt.name().equalsIgnoreCase(s)) {
069          return opt;
070        }
071      }
072      throw new IllegalArgumentException("Failed to convert \"" + s
073          + "\" to " + RollingUpgradeStartupOption.class.getSimpleName());
074    }
075  }
076
077  /** Startup options */
078  static public enum StartupOption{
079    FORMAT  ("-format"),
080    CLUSTERID ("-clusterid"),
081    GENCLUSTERID ("-genclusterid"),
082    REGULAR ("-regular"),
083    BACKUP  ("-backup"),
084    CHECKPOINT("-checkpoint"),
085    UPGRADE ("-upgrade"),
086    ROLLBACK("-rollback"),
087    FINALIZE("-finalize"),
088    ROLLINGUPGRADE("-rollingUpgrade"),
089    IMPORT  ("-importCheckpoint"),
090    BOOTSTRAPSTANDBY("-bootstrapStandby"),
091    INITIALIZESHAREDEDITS("-initializeSharedEdits"),
092    RECOVER  ("-recover"),
093    FORCE("-force"),
094    NONINTERACTIVE("-nonInteractive"),
095    RENAMERESERVED("-renameReserved"),
096    METADATAVERSION("-metadataVersion");
097
098    private static final Pattern ENUM_WITH_ROLLING_UPGRADE_OPTION = Pattern.compile(
099        "(\\w+)\\((\\w+)\\)");
100
101    private final String name;
102    
103    // Used only with format and upgrade options
104    private String clusterId = null;
105    
106    // Used only by rolling upgrade
107    private RollingUpgradeStartupOption rollingUpgradeStartupOption;
108
109    // Used only with format option
110    private boolean isForceFormat = false;
111    private boolean isInteractiveFormat = true;
112    
113    // Used only with recovery option
114    private int force = 0;
115
116    private StartupOption(String arg) {this.name = arg;}
117    public String getName() {return name;}
118    public NamenodeRole toNodeRole() {
119      switch(this) {
120      case BACKUP: 
121        return NamenodeRole.BACKUP;
122      case CHECKPOINT: 
123        return NamenodeRole.CHECKPOINT;
124      default:
125        return NamenodeRole.NAMENODE;
126      }
127    }
128    
129    public void setClusterId(String cid) {
130      clusterId = cid;
131    }
132
133    public String getClusterId() {
134      return clusterId;
135    }
136    
137    public void setRollingUpgradeStartupOption(String opt) {
138      Preconditions.checkState(this == ROLLINGUPGRADE);
139      rollingUpgradeStartupOption = RollingUpgradeStartupOption.fromString(opt);
140    }
141    
142    public RollingUpgradeStartupOption getRollingUpgradeStartupOption() {
143      Preconditions.checkState(this == ROLLINGUPGRADE);
144      return rollingUpgradeStartupOption;
145    }
146
147    public MetaRecoveryContext createRecoveryContext() {
148      if (!name.equals(RECOVER.name))
149        return null;
150      return new MetaRecoveryContext(force);
151    }
152
153    public void setForce(int force) {
154      this.force = force;
155    }
156    
157    public int getForce() {
158      return this.force;
159    }
160    
161    public boolean getForceFormat() {
162      return isForceFormat;
163    }
164    
165    public void setForceFormat(boolean force) {
166      isForceFormat = force;
167    }
168    
169    public boolean getInteractiveFormat() {
170      return isInteractiveFormat;
171    }
172    
173    public void setInteractiveFormat(boolean interactive) {
174      isInteractiveFormat = interactive;
175    }
176    
177    @Override
178    public String toString() {
179      if (this == ROLLINGUPGRADE) {
180        return new StringBuilder(super.toString())
181            .append("(").append(getRollingUpgradeStartupOption()).append(")")
182            .toString();
183      }
184      return super.toString();
185    }
186
187    static public StartupOption getEnum(String value) {
188      Matcher matcher = ENUM_WITH_ROLLING_UPGRADE_OPTION.matcher(value);
189      if (matcher.matches()) {
190        StartupOption option = StartupOption.valueOf(matcher.group(1));
191        option.setRollingUpgradeStartupOption(matcher.group(2));
192        return option;
193      } else {
194        return StartupOption.valueOf(value);
195      }
196    }
197  }
198
199  // Timeouts for communicating with DataNode for streaming writes/reads
200  public static final int READ_TIMEOUT = 60 * 1000;
201  public static final int READ_TIMEOUT_EXTENSION = 5 * 1000;
202  public static final int WRITE_TIMEOUT = 8 * 60 * 1000;
203  public static final int WRITE_TIMEOUT_EXTENSION = 5 * 1000; //for write pipeline
204
205  /**
206   * Defines the NameNode role.
207   */
208  static public enum NamenodeRole {
209    NAMENODE  ("NameNode"),
210    BACKUP    ("Backup Node"),
211    CHECKPOINT("Checkpoint Node");
212
213    private String description = null;
214    private NamenodeRole(String arg) {this.description = arg;}
215  
216    @Override
217    public String toString() {
218      return description;
219    }
220  }
221
222  /**
223   * Block replica states, which it can go through while being constructed.
224   */
225  static public enum ReplicaState {
226    /** Replica is finalized. The state when replica is not modified. */
227    FINALIZED(0),
228    /** Replica is being written to. */
229    RBW(1),
230    /** Replica is waiting to be recovered. */
231    RWR(2),
232    /** Replica is under recovery. */
233    RUR(3),
234    /** Temporary replica: created for replication and relocation only. */
235    TEMPORARY(4);
236
237    private final int value;
238
239    private ReplicaState(int v) {
240      value = v;
241    }
242
243    public int getValue() {
244      return value;
245    }
246
247    public static ReplicaState getState(int v) {
248      return ReplicaState.values()[v];
249    }
250
251    /** Read from in */
252    public static ReplicaState read(DataInput in) throws IOException {
253      return values()[in.readByte()];
254    }
255
256    /** Write to out */
257    public void write(DataOutput out) throws IOException {
258      out.writeByte(ordinal());
259    }
260  }
261
262  /**
263   * States, which a block can go through while it is under construction.
264   */
265  static public enum BlockUCState {
266    /**
267     * Block construction completed.<br>
268     * The block has at least one {@link ReplicaState#FINALIZED} replica,
269     * and is not going to be modified.
270     */
271    COMPLETE,
272    /**
273     * The block is under construction.<br>
274     * It has been recently allocated for write or append.
275     */
276    UNDER_CONSTRUCTION,
277    /**
278     * The block is under recovery.<br>
279     * When a file lease expires its last block may not be {@link #COMPLETE}
280     * and needs to go through a recovery procedure, 
281     * which synchronizes the existing replicas contents.
282     */
283    UNDER_RECOVERY,
284    /**
285     * The block is committed.<br>
286     * The client reported that all bytes are written to data-nodes
287     * with the given generation stamp and block length, but no 
288     * {@link ReplicaState#FINALIZED} 
289     * replicas has yet been reported by data-nodes themselves.
290     */
291    COMMITTED;
292  }
293  
294  public static final String NAMENODE_LEASE_HOLDER = "HDFS_NameNode";
295  public static final long NAMENODE_LEASE_RECHECK_INTERVAL = 2000;
296}
297