001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hdfs.protocol; 019 020import java.util.HashMap; 021import java.util.Map; 022 023import org.apache.hadoop.classification.InterfaceAudience; 024import org.apache.hadoop.fs.Path; 025import org.apache.hadoop.hdfs.DFSConfigKeys; 026import org.apache.hadoop.hdfs.DFSUtil; 027import org.apache.hadoop.hdfs.HdfsConfiguration; 028import org.apache.hadoop.hdfs.server.namenode.FSDirectory; 029import org.apache.hadoop.hdfs.server.datanode.DataNode; 030import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion; 031import org.apache.hadoop.hdfs.server.namenode.NameNode; 032import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion; 033import org.apache.hadoop.util.StringUtils; 034 035/************************************ 036 * Some handy constants 037 * 038 ************************************/ 039@InterfaceAudience.Private 040public class HdfsConstants { 041 /* Hidden constructor */ 042 protected HdfsConstants() { 043 } 044 045 /** 046 * HDFS Protocol Names: 047 */ 048 public static final String CLIENT_NAMENODE_PROTOCOL_NAME = 049 "org.apache.hadoop.hdfs.protocol.ClientProtocol"; 050 public static final String CLIENT_DATANODE_PROTOCOL_NAME = 051 "org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol"; 052 053 054 public static final int MIN_BLOCKS_FOR_WRITE = 1; 055 056 // Long that indicates "leave current quota unchanged" 057 public static final long QUOTA_DONT_SET = Long.MAX_VALUE; 058 public static final long QUOTA_RESET = -1L; 059 060 // 061 // Timeouts, constants 062 // 063 public static final long LEASE_SOFTLIMIT_PERIOD = 60 * 1000; 064 public static final long LEASE_HARDLIMIT_PERIOD = 60 * LEASE_SOFTLIMIT_PERIOD; 065 public static final long LEASE_RECOVER_PERIOD = 10 * 1000; // in ms 066 067 // We need to limit the length and depth of a path in the filesystem. 068 // HADOOP-438 069 // Currently we set the maximum length to 8k characters and the maximum depth 070 // to 1k. 071 public static final int MAX_PATH_LENGTH = 8000; 072 public static final int MAX_PATH_DEPTH = 1000; 073 074 // TODO should be conf injected? 075 public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024; 076 public static final int IO_FILE_BUFFER_SIZE = new HdfsConfiguration().getInt( 077 DFSConfigKeys.IO_FILE_BUFFER_SIZE_KEY, 078 DFSConfigKeys.IO_FILE_BUFFER_SIZE_DEFAULT); 079 // Used for writing header etc. 080 public static final int SMALL_BUFFER_SIZE = Math.min(IO_FILE_BUFFER_SIZE / 2, 081 512); 082 083 public static final int BYTES_IN_INTEGER = Integer.SIZE / Byte.SIZE; 084 085 // SafeMode actions 086 public static enum SafeModeAction { 087 SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET; 088 } 089 090 public static enum RollingUpgradeAction { 091 QUERY, PREPARE, FINALIZE; 092 093 private static final Map<String, RollingUpgradeAction> MAP 094 = new HashMap<String, RollingUpgradeAction>(); 095 static { 096 MAP.put("", QUERY); 097 for(RollingUpgradeAction a : values()) { 098 MAP.put(a.name(), a); 099 } 100 } 101 102 /** Covert the given String to a RollingUpgradeAction. */ 103 public static RollingUpgradeAction fromString(String s) { 104 return MAP.get(StringUtils.toUpperCase(s)); 105 } 106 } 107 108 // type of the datanode report 109 public static enum DatanodeReportType { 110 ALL, LIVE, DEAD, DECOMMISSIONING 111 } 112 113 // An invalid transaction ID that will never be seen in a real namesystem. 114 public static final long INVALID_TXID = -12345; 115 116 // Number of generation stamps reserved for legacy blocks. 117 public static final long RESERVED_GENERATION_STAMPS_V1 = 118 1024L * 1024 * 1024 * 1024; 119 120 /** 121 * URI Scheme for hdfs://namenode/ URIs. 122 */ 123 public static final String HDFS_URI_SCHEME = "hdfs"; 124 125 /** 126 * A prefix put before the namenode URI inside the "service" field 127 * of a delgation token, indicating that the URI is a logical (HA) 128 * URI. 129 */ 130 public static final String HA_DT_SERVICE_PREFIX = "ha-"; 131 132 /** 133 * Path components that are reserved in HDFS. 134 * <p> 135 * .reserved is only reserved under root ("/"). 136 */ 137 public static final String[] RESERVED_PATH_COMPONENTS = new String[] { 138 HdfsConstants.DOT_SNAPSHOT_DIR, 139 FSDirectory.DOT_RESERVED_STRING 140 }; 141 142 /** 143 * Current layout version for NameNode. 144 * Please see {@link NameNodeLayoutVersion.Feature} on adding new layout version. 145 */ 146 public static final int NAMENODE_LAYOUT_VERSION 147 = NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION; 148 149 /** 150 * Current layout version for DataNode. 151 * Please see {@link DataNodeLayoutVersion.Feature} on adding new layout version. 152 */ 153 public static final int DATANODE_LAYOUT_VERSION 154 = DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION; 155 156 /** 157 * A special path component contained in the path for a snapshot file/dir 158 */ 159 public static final String DOT_SNAPSHOT_DIR = ".snapshot"; 160 161 public static final byte[] DOT_SNAPSHOT_DIR_BYTES 162 = DFSUtil.string2Bytes(DOT_SNAPSHOT_DIR); 163 164 public static final String SEPARATOR_DOT_SNAPSHOT_DIR 165 = Path.SEPARATOR + DOT_SNAPSHOT_DIR; 166 167 public static final String SEPARATOR_DOT_SNAPSHOT_DIR_SEPARATOR 168 = Path.SEPARATOR + DOT_SNAPSHOT_DIR + Path.SEPARATOR; 169 170 public static final String MEMORY_STORAGE_POLICY_NAME = "LAZY_PERSIST"; 171 public static final String ALLSSD_STORAGE_POLICY_NAME = "ALL_SSD"; 172 public static final String ONESSD_STORAGE_POLICY_NAME = "ONE_SSD"; 173 public static final String HOT_STORAGE_POLICY_NAME = "HOT"; 174 public static final String WARM_STORAGE_POLICY_NAME = "WARM"; 175 public static final String COLD_STORAGE_POLICY_NAME = "COLD"; 176 177 public static final byte MEMORY_STORAGE_POLICY_ID = 15; 178 public static final byte ALLSSD_STORAGE_POLICY_ID = 12; 179 public static final byte ONESSD_STORAGE_POLICY_ID = 10; 180 public static final byte HOT_STORAGE_POLICY_ID = 7; 181 public static final byte WARM_STORAGE_POLICY_ID = 5; 182 public static final byte COLD_STORAGE_POLICY_ID = 2; 183}