001 /** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018 package org.apache.hadoop.hdfs.server.common; 019 020 import java.io.DataInput; 021 import java.io.DataOutput; 022 import java.io.IOException; 023 024 import org.apache.hadoop.classification.InterfaceAudience; 025 import org.apache.hadoop.hdfs.server.namenode.MetaRecoveryContext; 026 027 /************************************ 028 * Some handy internal HDFS constants 029 * 030 ************************************/ 031 032 @InterfaceAudience.Private 033 public final class HdfsServerConstants { 034 /* Hidden constructor */ 035 private HdfsServerConstants() { } 036 037 /** 038 * Type of the node 039 */ 040 static public enum NodeType { 041 NAME_NODE, 042 DATA_NODE, 043 JOURNAL_NODE; 044 } 045 046 /** Startup options */ 047 static public enum StartupOption{ 048 FORMAT ("-format"), 049 CLUSTERID ("-clusterid"), 050 GENCLUSTERID ("-genclusterid"), 051 REGULAR ("-regular"), 052 BACKUP ("-backup"), 053 CHECKPOINT("-checkpoint"), 054 UPGRADE ("-upgrade"), 055 ROLLBACK("-rollback"), 056 FINALIZE("-finalize"), 057 IMPORT ("-importCheckpoint"), 058 BOOTSTRAPSTANDBY("-bootstrapStandby"), 059 INITIALIZESHAREDEDITS("-initializeSharedEdits"), 060 RECOVER ("-recover"), 061 FORCE("-force"), 062 NONINTERACTIVE("-nonInteractive"); 063 064 private final String name; 065 066 // Used only with format and upgrade options 067 private String clusterId = null; 068 069 // Used only with format option 070 private boolean isForceFormat = false; 071 private boolean isInteractiveFormat = true; 072 073 // Used only with recovery option 074 private int force = 0; 075 076 private StartupOption(String arg) {this.name = arg;} 077 public String getName() {return name;} 078 public NamenodeRole toNodeRole() { 079 switch(this) { 080 case BACKUP: 081 return NamenodeRole.BACKUP; 082 case CHECKPOINT: 083 return NamenodeRole.CHECKPOINT; 084 default: 085 return NamenodeRole.NAMENODE; 086 } 087 } 088 089 public void setClusterId(String cid) { 090 clusterId = cid; 091 } 092 093 public String getClusterId() { 094 return clusterId; 095 } 096 097 public MetaRecoveryContext createRecoveryContext() { 098 if (!name.equals(RECOVER.name)) 099 return null; 100 return new MetaRecoveryContext(force); 101 } 102 103 public void setForce(int force) { 104 this.force = force; 105 } 106 107 public int getForce() { 108 return this.force; 109 } 110 111 public boolean getForceFormat() { 112 return isForceFormat; 113 } 114 115 public void setForceFormat(boolean force) { 116 isForceFormat = force; 117 } 118 119 public boolean getInteractiveFormat() { 120 return isInteractiveFormat; 121 } 122 123 public void setInteractiveFormat(boolean interactive) { 124 isInteractiveFormat = interactive; 125 } 126 } 127 128 // Timeouts for communicating with DataNode for streaming writes/reads 129 public static int READ_TIMEOUT = 60 * 1000; 130 public static int READ_TIMEOUT_EXTENSION = 5 * 1000; 131 public static int WRITE_TIMEOUT = 8 * 60 * 1000; 132 public static int WRITE_TIMEOUT_EXTENSION = 5 * 1000; //for write pipeline 133 134 /** 135 * Defines the NameNode role. 136 */ 137 static public enum NamenodeRole { 138 NAMENODE ("NameNode"), 139 BACKUP ("Backup Node"), 140 CHECKPOINT("Checkpoint Node"); 141 142 private String description = null; 143 private NamenodeRole(String arg) {this.description = arg;} 144 145 @Override 146 public String toString() { 147 return description; 148 } 149 } 150 151 /** 152 * Block replica states, which it can go through while being constructed. 153 */ 154 static public enum ReplicaState { 155 /** Replica is finalized. The state when replica is not modified. */ 156 FINALIZED(0), 157 /** Replica is being written to. */ 158 RBW(1), 159 /** Replica is waiting to be recovered. */ 160 RWR(2), 161 /** Replica is under recovery. */ 162 RUR(3), 163 /** Temporary replica: created for replication and relocation only. */ 164 TEMPORARY(4); 165 166 private int value; 167 168 private ReplicaState(int v) { 169 value = v; 170 } 171 172 public int getValue() { 173 return value; 174 } 175 176 public static ReplicaState getState(int v) { 177 return ReplicaState.values()[v]; 178 } 179 180 /** Read from in */ 181 public static ReplicaState read(DataInput in) throws IOException { 182 return values()[in.readByte()]; 183 } 184 185 /** Write to out */ 186 public void write(DataOutput out) throws IOException { 187 out.writeByte(ordinal()); 188 } 189 } 190 191 /** 192 * States, which a block can go through while it is under construction. 193 */ 194 static public enum BlockUCState { 195 /** 196 * Block construction completed.<br> 197 * The block has at least one {@link ReplicaState#FINALIZED} replica, 198 * and is not going to be modified. 199 */ 200 COMPLETE, 201 /** 202 * The block is under construction.<br> 203 * It has been recently allocated for write or append. 204 */ 205 UNDER_CONSTRUCTION, 206 /** 207 * The block is under recovery.<br> 208 * When a file lease expires its last block may not be {@link #COMPLETE} 209 * and needs to go through a recovery procedure, 210 * which synchronizes the existing replicas contents. 211 */ 212 UNDER_RECOVERY, 213 /** 214 * The block is committed.<br> 215 * The client reported that all bytes are written to data-nodes 216 * with the given generation stamp and block length, but no 217 * {@link ReplicaState#FINALIZED} 218 * replicas has yet been reported by data-nodes themselves. 219 */ 220 COMMITTED; 221 } 222 223 public static final String NAMENODE_LEASE_HOLDER = "HDFS_NameNode"; 224 public static final long NAMENODE_LEASE_RECHECK_INTERVAL = 2000; 225 } 226