001    /**
002     * Licensed to the Apache Software Foundation (ASF) under one
003     * or more contributor license agreements.  See the NOTICE file
004     * distributed with this work for additional information
005     * regarding copyright ownership.  The ASF licenses this file
006     * to you under the Apache License, Version 2.0 (the
007     * "License"); you may not use this file except in compliance
008     * with the License.  You may obtain a copy of the License at
009     *
010     *     http://www.apache.org/licenses/LICENSE-2.0
011     *
012     * Unless required by applicable law or agreed to in writing, software
013     * distributed under the License is distributed on an "AS IS" BASIS,
014     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015     * See the License for the specific language governing permissions and
016     * limitations under the License.
017     */
018    package org.apache.hadoop.hdfs.protocol;
019    
020    import org.apache.hadoop.classification.InterfaceAudience;
021    import org.apache.hadoop.hdfs.DFSConfigKeys;
022    import org.apache.hadoop.hdfs.HdfsConfiguration;
023    
024    /************************************
025     * Some handy constants
026     * 
027     ************************************/
028    @InterfaceAudience.Private
029    public class HdfsConstants {
030      /* Hidden constructor */
031      protected HdfsConstants() {
032      }
033    
034      public static int MIN_BLOCKS_FOR_WRITE = 5;
035    
036      // Long that indicates "leave current quota unchanged"
037      public static final long QUOTA_DONT_SET = Long.MAX_VALUE;
038      public static final long QUOTA_RESET = -1L;
039    
040      //
041      // Timeouts, constants
042      //
043      public static final long LEASE_SOFTLIMIT_PERIOD = 60 * 1000;
044      public static final long LEASE_HARDLIMIT_PERIOD = 60 * LEASE_SOFTLIMIT_PERIOD;
045      public static final long LEASE_RECOVER_PERIOD = 10 * 1000; // in ms
046    
047      // We need to limit the length and depth of a path in the filesystem.
048      // HADOOP-438
049      // Currently we set the maximum length to 8k characters and the maximum depth
050      // to 1k.
051      public static int MAX_PATH_LENGTH = 8000;
052      public static int MAX_PATH_DEPTH = 1000;
053    
054      // TODO [email protected]: should be conf injected?
055      public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024;
056      public static final int IO_FILE_BUFFER_SIZE = new HdfsConfiguration().getInt(
057          DFSConfigKeys.IO_FILE_BUFFER_SIZE_KEY,
058          DFSConfigKeys.IO_FILE_BUFFER_SIZE_DEFAULT);
059      // Used for writing header etc.
060      public static final int SMALL_BUFFER_SIZE = Math.min(IO_FILE_BUFFER_SIZE / 2,
061          512);
062    
063      public static final int BYTES_IN_INTEGER = Integer.SIZE / Byte.SIZE;
064    
065      // SafeMode actions
066      public static enum SafeModeAction {
067        SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET;
068      }
069    
070      // type of the datanode report
071      public static enum DatanodeReportType {
072        ALL, LIVE, DEAD
073      }
074    
075      // An invalid transaction ID that will never be seen in a real namesystem.
076      public static final long INVALID_TXID = -12345;
077    
078      /**
079       * Distributed upgrade actions:
080       * 
081       * 1. Get upgrade status. 2. Get detailed upgrade status. 3. Proceed with the
082       * upgrade if it is stuck, no matter what the status is.
083       */
084      public static enum UpgradeAction {
085        GET_STATUS, DETAILED_STATUS, FORCE_PROCEED;
086      }
087    
088      /**
089       * URI Scheme for hdfs://namenode/ URIs.
090       */
091      public static final String HDFS_URI_SCHEME = "hdfs";
092    
093      /**
094       * Please see {@link LayoutVersion} on adding new layout version.
095       */
096      public static final int LAYOUT_VERSION = LayoutVersion
097          .getCurrentLayoutVersion();
098    }