001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hdfs.protocol;
019
020import java.util.HashMap;
021import java.util.Map;
022
023import org.apache.hadoop.classification.InterfaceAudience;
024import org.apache.hadoop.fs.Path;
025import org.apache.hadoop.hdfs.DFSConfigKeys;
026import org.apache.hadoop.hdfs.DFSUtil;
027import org.apache.hadoop.hdfs.HdfsConfiguration;
028import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
029import org.apache.hadoop.hdfs.server.datanode.DataNode;
030import org.apache.hadoop.hdfs.server.datanode.DataNodeLayoutVersion;
031import org.apache.hadoop.hdfs.server.namenode.NameNode;
032import org.apache.hadoop.hdfs.server.namenode.NameNodeLayoutVersion;
033
034/************************************
035 * Some handy constants
036 * 
037 ************************************/
038@InterfaceAudience.Private
039public class HdfsConstants {
040  /* Hidden constructor */
041  protected HdfsConstants() {
042  }
043  
044  /**
045   * HDFS Protocol Names:  
046   */
047  public static final String CLIENT_NAMENODE_PROTOCOL_NAME = 
048      "org.apache.hadoop.hdfs.protocol.ClientProtocol";
049  public static final String CLIENT_DATANODE_PROTOCOL_NAME = 
050      "org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol";
051  
052  
053  public static final int MIN_BLOCKS_FOR_WRITE = 5;
054
055  // Long that indicates "leave current quota unchanged"
056  public static final long QUOTA_DONT_SET = Long.MAX_VALUE;
057  public static final long QUOTA_RESET = -1L;
058
059  //
060  // Timeouts, constants
061  //
062  public static final long LEASE_SOFTLIMIT_PERIOD = 60 * 1000;
063  public static final long LEASE_HARDLIMIT_PERIOD = 60 * LEASE_SOFTLIMIT_PERIOD;
064  public static final long LEASE_RECOVER_PERIOD = 10 * 1000; // in ms
065
066  // We need to limit the length and depth of a path in the filesystem.
067  // HADOOP-438
068  // Currently we set the maximum length to 8k characters and the maximum depth
069  // to 1k.
070  public static final int MAX_PATH_LENGTH = 8000;
071  public static final int MAX_PATH_DEPTH = 1000;
072
073  // TODO should be conf injected?
074  public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024;
075  public static final int IO_FILE_BUFFER_SIZE = new HdfsConfiguration().getInt(
076      DFSConfigKeys.IO_FILE_BUFFER_SIZE_KEY,
077      DFSConfigKeys.IO_FILE_BUFFER_SIZE_DEFAULT);
078  // Used for writing header etc.
079  public static final int SMALL_BUFFER_SIZE = Math.min(IO_FILE_BUFFER_SIZE / 2,
080      512);
081
082  public static final int BYTES_IN_INTEGER = Integer.SIZE / Byte.SIZE;
083
084  // SafeMode actions
085  public static enum SafeModeAction {
086    SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET;
087  }
088
089  public static enum RollingUpgradeAction {
090    QUERY, PREPARE, FINALIZE;
091    
092    private static final Map<String, RollingUpgradeAction> MAP
093        = new HashMap<String, RollingUpgradeAction>();
094    static {
095      MAP.put("", QUERY);
096      for(RollingUpgradeAction a : values()) {
097        MAP.put(a.name(), a);
098      }
099    }
100
101    /** Covert the given String to a RollingUpgradeAction. */
102    public static RollingUpgradeAction fromString(String s) {
103      return MAP.get(s.toUpperCase());
104    }
105  }
106
107  // type of the datanode report
108  public static enum DatanodeReportType {
109    ALL, LIVE, DEAD, DECOMMISSIONING
110  }
111
112  // An invalid transaction ID that will never be seen in a real namesystem.
113  public static final long INVALID_TXID = -12345;
114
115  // Number of generation stamps reserved for legacy blocks.
116  public static final long RESERVED_GENERATION_STAMPS_V1 =
117      1024L * 1024 * 1024 * 1024;
118
119  /**
120   * URI Scheme for hdfs://namenode/ URIs.
121   */
122  public static final String HDFS_URI_SCHEME = "hdfs";
123
124  /**
125   * A prefix put before the namenode URI inside the "service" field
126   * of a delgation token, indicating that the URI is a logical (HA)
127   * URI.
128   */
129  public static final String HA_DT_SERVICE_PREFIX = "ha-hdfs:";
130
131  /**
132   * Path components that are reserved in HDFS.
133   * <p>
134   * .reserved is only reserved under root ("/").
135   */
136  public static final String[] RESERVED_PATH_COMPONENTS = new String[] {
137    HdfsConstants.DOT_SNAPSHOT_DIR,
138    FSDirectory.DOT_RESERVED_STRING
139  };
140
141  /**
142   * Current layout version for NameNode.
143   * Please see {@link NameNodeLayoutVersion.Feature} on adding new layout version.
144   */
145  public static final int NAMENODE_LAYOUT_VERSION
146      = NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION;
147
148  /**
149   * Current layout version for DataNode.
150   * Please see {@link DataNodeLayoutVersion.Feature} on adding new layout version.
151   */
152  public static final int DATANODE_LAYOUT_VERSION
153      = DataNodeLayoutVersion.CURRENT_LAYOUT_VERSION;
154
155  /**
156   * A special path component contained in the path for a snapshot file/dir
157   */
158  public static final String DOT_SNAPSHOT_DIR = ".snapshot";
159
160  public static final byte[] DOT_SNAPSHOT_DIR_BYTES
161      = DFSUtil.string2Bytes(DOT_SNAPSHOT_DIR);
162  
163  public static final String SEPARATOR_DOT_SNAPSHOT_DIR
164      = Path.SEPARATOR + DOT_SNAPSHOT_DIR; 
165}