001    /**
002     * Licensed to the Apache Software Foundation (ASF) under one
003     * or more contributor license agreements.  See the NOTICE file
004     * distributed with this work for additional information
005     * regarding copyright ownership.  The ASF licenses this file
006     * to you under the Apache License, Version 2.0 (the
007     * "License"); you may not use this file except in compliance
008     * with the License.  You may obtain a copy of the License at
009     *
010     *     http://www.apache.org/licenses/LICENSE-2.0
011     *
012     * Unless required by applicable law or agreed to in writing, software
013     * distributed under the License is distributed on an "AS IS" BASIS,
014     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015     * See the License for the specific language governing permissions and
016     * limitations under the License.
017     */
018    package org.apache.hadoop.hdfs;
019    
020    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
021    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
022    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
023    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
024    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT;
025    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY;
026    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT;
027    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY;
028    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT;
029    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_KEY;
030    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT;
031    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY;
032    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT;
033    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY;
034    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT;
035    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY;
036    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT;
037    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
038    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY;
039    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE;
040    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT;
041    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY;
042    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT;
043    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY;
044    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
045    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
046    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
047    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL;
048    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT;
049    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
050    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
051    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
052    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
053    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
054    
055    import java.io.BufferedOutputStream;
056    import java.io.DataInputStream;
057    import java.io.DataOutputStream;
058    import java.io.FileNotFoundException;
059    import java.io.IOException;
060    import java.io.InputStream;
061    import java.io.OutputStream;
062    import java.net.InetAddress;
063    import java.net.InetSocketAddress;
064    import java.net.NetworkInterface;
065    import java.net.Socket;
066    import java.net.SocketException;
067    import java.net.SocketAddress;
068    import java.net.URI;
069    import java.net.UnknownHostException;
070    import java.util.ArrayList;
071    import java.util.Collections;
072    import java.util.EnumSet;
073    import java.util.HashMap;
074    import java.util.LinkedHashMap;
075    import java.util.List;
076    import java.util.Map;
077    import java.util.Random;
078    
079    import javax.net.SocketFactory;
080    
081    import org.apache.commons.logging.Log;
082    import org.apache.commons.logging.LogFactory;
083    import org.apache.hadoop.classification.InterfaceAudience;
084    import org.apache.hadoop.conf.Configuration;
085    import org.apache.hadoop.fs.BlockLocation;
086    import org.apache.hadoop.fs.BlockStorageLocation;
087    import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
088    import org.apache.hadoop.fs.ContentSummary;
089    import org.apache.hadoop.fs.CreateFlag;
090    import org.apache.hadoop.fs.FileAlreadyExistsException;
091    import org.apache.hadoop.fs.FileSystem;
092    import org.apache.hadoop.fs.FsServerDefaults;
093    import org.apache.hadoop.fs.FsStatus;
094    import org.apache.hadoop.fs.HdfsBlockLocation;
095    import org.apache.hadoop.fs.InvalidPathException;
096    import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
097    import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
098    import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
099    import org.apache.hadoop.fs.Options;
100    import org.apache.hadoop.fs.Options.ChecksumOpt;
101    import org.apache.hadoop.fs.ParentNotDirectoryException;
102    import org.apache.hadoop.fs.UnresolvedLinkException;
103    import org.apache.hadoop.fs.VolumeId;
104    import org.apache.hadoop.fs.permission.FsPermission;
105    import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
106    import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
107    import org.apache.hadoop.hdfs.protocol.ClientProtocol;
108    import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
109    import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
110    import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
111    import org.apache.hadoop.hdfs.protocol.DirectoryListing;
112    import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
113    import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
114    import org.apache.hadoop.hdfs.protocol.HdfsConstants;
115    import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
116    import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
117    import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
118    import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
119    import org.apache.hadoop.hdfs.protocol.LocatedBlock;
120    import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
121    import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
122    import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
123    import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
124    import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
125    import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
126    import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
127    import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
128    import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
129    import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
130    import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
131    import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
132    import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
133    import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
134    import org.apache.hadoop.hdfs.protocolPB.PBHelper;
135    import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
136    import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
137    import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
138    import org.apache.hadoop.hdfs.server.namenode.NameNode;
139    import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
140    import org.apache.hadoop.io.DataOutputBuffer;
141    import org.apache.hadoop.io.EnumSetWritable;
142    import org.apache.hadoop.io.IOUtils;
143    import org.apache.hadoop.io.MD5Hash;
144    import org.apache.hadoop.io.Text;
145    import org.apache.hadoop.ipc.Client;
146    import org.apache.hadoop.ipc.RPC;
147    import org.apache.hadoop.ipc.RemoteException;
148    import org.apache.hadoop.net.DNS;
149    import org.apache.hadoop.net.NetUtils;
150    import org.apache.hadoop.security.AccessControlException;
151    import org.apache.hadoop.security.UserGroupInformation;
152    import org.apache.hadoop.security.token.SecretManager.InvalidToken;
153    import org.apache.hadoop.security.token.Token;
154    import org.apache.hadoop.security.token.TokenRenewer;
155    import org.apache.hadoop.util.DataChecksum;
156    import org.apache.hadoop.util.DataChecksum.Type;
157    import org.apache.hadoop.util.Progressable;
158    import org.apache.hadoop.util.Time;
159    
160    import com.google.common.annotations.VisibleForTesting;
161    import com.google.common.base.Joiner;
162    import com.google.common.base.Preconditions;
163    import com.google.common.net.InetAddresses;
164    
165    /********************************************************
166     * DFSClient can connect to a Hadoop Filesystem and 
167     * perform basic file tasks.  It uses the ClientProtocol
168     * to communicate with a NameNode daemon, and connects 
169     * directly to DataNodes to read/write block data.
170     *
171     * Hadoop DFS users should obtain an instance of 
172     * DistributedFileSystem, which uses DFSClient to handle
173     * filesystem tasks.
174     *
175     ********************************************************/
176    @InterfaceAudience.Private
177    public class DFSClient implements java.io.Closeable {
178      public static final Log LOG = LogFactory.getLog(DFSClient.class);
179      public static final long SERVER_DEFAULTS_VALIDITY_PERIOD = 60 * 60 * 1000L; // 1 hour
180      static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
181    
182      private final Configuration conf;
183      private final Conf dfsClientConf;
184      final ClientProtocol namenode;
185      /* The service used for delegation tokens */
186      private Text dtService;
187    
188      final UserGroupInformation ugi;
189      volatile boolean clientRunning = true;
190      volatile long lastLeaseRenewal;
191      private volatile FsServerDefaults serverDefaults;
192      private volatile long serverDefaultsLastUpdate;
193      final String clientName;
194      SocketFactory socketFactory;
195      final ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure;
196      final FileSystem.Statistics stats;
197      private final String authority;
198      final PeerCache peerCache;
199      private Random r = new Random();
200      private SocketAddress[] localInterfaceAddrs;
201      private DataEncryptionKey encryptionKey;
202      private boolean shouldUseLegacyBlockReaderLocal;
203      
204      /**
205       * DFSClient configuration 
206       */
207      public static class Conf {
208        final int hdfsTimeout;    // timeout value for a DFS operation.
209        final int maxFailoverAttempts;
210        final int failoverSleepBaseMillis;
211        final int failoverSleepMaxMillis;
212        final int maxBlockAcquireFailures;
213        final int confTime;
214        final int ioBufferSize;
215        final ChecksumOpt defaultChecksumOpt;
216        final int writePacketSize;
217        final int socketTimeout;
218        final int socketCacheCapacity;
219        final long socketCacheExpiry;
220        final long excludedNodesCacheExpiry;
221        /** Wait time window (in msec) if BlockMissingException is caught */
222        final int timeWindow;
223        final int nCachedConnRetry;
224        final int nBlockWriteRetry;
225        final int nBlockWriteLocateFollowingRetry;
226        final long defaultBlockSize;
227        final long prefetchSize;
228        final short defaultReplication;
229        final String taskId;
230        final FsPermission uMask;
231        final boolean connectToDnViaHostname;
232        final boolean getHdfsBlocksMetadataEnabled;
233        final int getFileBlockStorageLocationsNumThreads;
234        final int getFileBlockStorageLocationsTimeout;
235    
236        final boolean useLegacyBlockReader;
237        final boolean useLegacyBlockReaderLocal;
238        final String domainSocketPath;
239        final boolean skipShortCircuitChecksums;
240        final int shortCircuitBufferSize;
241        final boolean shortCircuitLocalReads;
242        final boolean domainSocketDataTraffic;
243        final int shortCircuitStreamsCacheSize;
244        final long shortCircuitStreamsCacheExpiryMs; 
245    
246        public Conf(Configuration conf) {
247          // The hdfsTimeout is currently the same as the ipc timeout 
248          hdfsTimeout = Client.getTimeout(conf);
249    
250          maxFailoverAttempts = conf.getInt(
251              DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
252              DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
253          failoverSleepBaseMillis = conf.getInt(
254              DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
255              DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);
256          failoverSleepMaxMillis = conf.getInt(
257              DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY,
258              DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT);
259    
260          maxBlockAcquireFailures = conf.getInt(
261              DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
262              DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT);
263          confTime = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
264              HdfsServerConstants.WRITE_TIMEOUT);
265          ioBufferSize = conf.getInt(
266              CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
267              CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
268          defaultChecksumOpt = getChecksumOptFromConf(conf);
269          socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
270              HdfsServerConstants.READ_TIMEOUT);
271          /** dfs.write.packet.size is an internal config variable */
272          writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
273              DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
274          defaultBlockSize = conf.getLongBytes(DFS_BLOCK_SIZE_KEY,
275              DFS_BLOCK_SIZE_DEFAULT);
276          defaultReplication = (short) conf.getInt(
277              DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);
278          taskId = conf.get("mapreduce.task.attempt.id", "NONMAPREDUCE");
279          socketCacheCapacity = conf.getInt(DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY,
280              DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT);
281          socketCacheExpiry = conf.getLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,
282              DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT);
283          excludedNodesCacheExpiry = conf.getLong(
284              DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL,
285              DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT);
286          prefetchSize = conf.getLong(DFS_CLIENT_READ_PREFETCH_SIZE_KEY,
287              10 * defaultBlockSize);
288          timeWindow = conf.getInt(DFS_CLIENT_RETRY_WINDOW_BASE, 3000);
289          nCachedConnRetry = conf.getInt(DFS_CLIENT_CACHED_CONN_RETRY_KEY,
290              DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT);
291          nBlockWriteRetry = conf.getInt(DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY,
292              DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT);
293          nBlockWriteLocateFollowingRetry = conf.getInt(
294              DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY,
295              DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT);
296          uMask = FsPermission.getUMask(conf);
297          connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME,
298              DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
299          getHdfsBlocksMetadataEnabled = conf.getBoolean(
300              DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, 
301              DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT);
302          getFileBlockStorageLocationsNumThreads = conf.getInt(
303              DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS,
304              DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT);
305          getFileBlockStorageLocationsTimeout = conf.getInt(
306              DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT,
307              DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_DEFAULT);
308    
309          useLegacyBlockReader = conf.getBoolean(
310              DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER,
311              DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT);
312          useLegacyBlockReaderLocal = conf.getBoolean(
313              DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL,
314              DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT);
315          shortCircuitLocalReads = conf.getBoolean(
316              DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY,
317              DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_DEFAULT);
318          domainSocketDataTraffic = conf.getBoolean(
319              DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC,
320              DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT);
321          domainSocketPath = conf.getTrimmed(
322              DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
323              DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_DEFAULT);
324    
325          if (BlockReaderLocal.LOG.isDebugEnabled()) {
326            BlockReaderLocal.LOG.debug(
327                DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL
328                + " = " + useLegacyBlockReaderLocal);
329            BlockReaderLocal.LOG.debug(
330                DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY
331                + " = " + shortCircuitLocalReads);
332            BlockReaderLocal.LOG.debug(
333                DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC
334                + " = " + domainSocketDataTraffic);
335            BlockReaderLocal.LOG.debug(
336                DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY
337                + " = " + domainSocketPath);
338          }
339    
340          skipShortCircuitChecksums = conf.getBoolean(
341              DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,
342              DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_DEFAULT);
343          shortCircuitBufferSize = conf.getInt(
344              DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_BUFFER_SIZE_KEY,
345              DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_BUFFER_SIZE_DEFAULT);
346          shortCircuitStreamsCacheSize = conf.getInt(
347              DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_SIZE_KEY,
348              DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_SIZE_DEFAULT);
349          shortCircuitStreamsCacheExpiryMs = conf.getLong(
350              DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
351              DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_DEFAULT);
352        }
353    
354        private DataChecksum.Type getChecksumType(Configuration conf) {
355          final String checksum = conf.get(
356              DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY,
357              DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
358          try {
359            return DataChecksum.Type.valueOf(checksum);
360          } catch(IllegalArgumentException iae) {
361            LOG.warn("Bad checksum type: " + checksum + ". Using default "
362                + DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
363            return DataChecksum.Type.valueOf(
364                DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT); 
365          }
366        }
367    
368        // Construct a checksum option from conf
369        private ChecksumOpt getChecksumOptFromConf(Configuration conf) {
370          DataChecksum.Type type = getChecksumType(conf);
371          int bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
372              DFS_BYTES_PER_CHECKSUM_DEFAULT);
373          return new ChecksumOpt(type, bytesPerChecksum);
374        }
375    
376        // create a DataChecksum with the default option.
377        private DataChecksum createChecksum() throws IOException {
378          return createChecksum(null);
379        }
380    
381        private DataChecksum createChecksum(ChecksumOpt userOpt) 
382            throws IOException {
383          // Fill in any missing field with the default.
384          ChecksumOpt myOpt = ChecksumOpt.processChecksumOpt(
385              defaultChecksumOpt, userOpt);
386          DataChecksum dataChecksum = DataChecksum.newDataChecksum(
387              myOpt.getChecksumType(),
388              myOpt.getBytesPerChecksum());
389          if (dataChecksum == null) {
390            throw new IOException("Invalid checksum type specified: "
391                + myOpt.getChecksumType().name());
392          }
393          return dataChecksum;
394        }
395      }
396     
397      public Conf getConf() {
398        return dfsClientConf;
399      }
400      
401      Configuration getConfiguration() {
402        return conf;
403      }
404      
405      /**
406       * A map from file names to {@link DFSOutputStream} objects
407       * that are currently being written by this client.
408       * Note that a file can only be written by a single client.
409       */
410      private final Map<String, DFSOutputStream> filesBeingWritten
411          = new HashMap<String, DFSOutputStream>();
412    
413      private final DomainSocketFactory domainSocketFactory;
414      
415      /**
416       * Same as this(NameNode.getAddress(conf), conf);
417       * @see #DFSClient(InetSocketAddress, Configuration)
418       * @deprecated Deprecated at 0.21
419       */
420      @Deprecated
421      public DFSClient(Configuration conf) throws IOException {
422        this(NameNode.getAddress(conf), conf);
423      }
424      
425      public DFSClient(InetSocketAddress address, Configuration conf) throws IOException {
426        this(NameNode.getUri(address), conf);
427      }
428    
429      /**
430       * Same as this(nameNodeUri, conf, null);
431       * @see #DFSClient(URI, Configuration, FileSystem.Statistics)
432       */
433      public DFSClient(URI nameNodeUri, Configuration conf
434          ) throws IOException {
435        this(nameNodeUri, conf, null);
436      }
437    
438      /**
439       * Same as this(nameNodeUri, null, conf, stats);
440       * @see #DFSClient(URI, ClientProtocol, Configuration, FileSystem.Statistics) 
441       */
442      public DFSClient(URI nameNodeUri, Configuration conf,
443                       FileSystem.Statistics stats)
444        throws IOException {
445        this(nameNodeUri, null, conf, stats);
446      }
447      
448      /** 
449       * Create a new DFSClient connected to the given nameNodeUri or rpcNamenode.
450       * Exactly one of nameNodeUri or rpcNamenode must be null.
451       */
452      @VisibleForTesting
453      public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode,
454          Configuration conf, FileSystem.Statistics stats)
455        throws IOException {
456        // Copy only the required DFSClient configuration
457        this.dfsClientConf = new Conf(conf);
458        this.shouldUseLegacyBlockReaderLocal = 
459            this.dfsClientConf.useLegacyBlockReaderLocal;
460        if (this.dfsClientConf.useLegacyBlockReaderLocal) {
461          LOG.debug("Using legacy short-circuit local reads.");
462        }
463        this.conf = conf;
464        this.stats = stats;
465        this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
466        this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
467    
468        this.ugi = UserGroupInformation.getCurrentUser();
469        
470        this.authority = nameNodeUri == null? "null": nameNodeUri.getAuthority();
471        this.clientName = "DFSClient_" + dfsClientConf.taskId + "_" + 
472            DFSUtil.getRandom().nextInt()  + "_" + Thread.currentThread().getId();
473        
474        if (rpcNamenode != null) {
475          // This case is used for testing.
476          Preconditions.checkArgument(nameNodeUri == null);
477          this.namenode = rpcNamenode;
478          dtService = null;
479        } else {
480          Preconditions.checkArgument(nameNodeUri != null,
481              "null URI");
482          NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo =
483            NameNodeProxies.createProxy(conf, nameNodeUri, ClientProtocol.class);
484          
485          this.dtService = proxyInfo.getDelegationTokenService();
486          this.namenode = proxyInfo.getProxy();
487        }
488    
489        // read directly from the block file if configured.
490        this.domainSocketFactory = new DomainSocketFactory(dfsClientConf);
491    
492        String localInterfaces[] =
493          conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES);
494        localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces);
495        if (LOG.isDebugEnabled() && 0 != localInterfaces.length) {
496          LOG.debug("Using local interfaces [" +
497          Joiner.on(',').join(localInterfaces)+ "] with addresses [" +
498          Joiner.on(',').join(localInterfaceAddrs) + "]");
499        }
500        
501        this.peerCache = PeerCache.getInstance(dfsClientConf.socketCacheCapacity, dfsClientConf.socketCacheExpiry);
502      }
503    
504      /**
505       * Return the socket addresses to use with each configured
506       * local interface. Local interfaces may be specified by IP
507       * address, IP address range using CIDR notation, interface
508       * name (e.g. eth0) or sub-interface name (e.g. eth0:0).
509       * The socket addresses consist of the IPs for the interfaces
510       * and the ephemeral port (port 0). If an IP, IP range, or
511       * interface name matches an interface with sub-interfaces
512       * only the IP of the interface is used. Sub-interfaces can
513       * be used by specifying them explicitly (by IP or name).
514       * 
515       * @return SocketAddresses for the configured local interfaces,
516       *    or an empty array if none are configured
517       * @throws UnknownHostException if a given interface name is invalid
518       */
519      private static SocketAddress[] getLocalInterfaceAddrs(
520          String interfaceNames[]) throws UnknownHostException {
521        List<SocketAddress> localAddrs = new ArrayList<SocketAddress>();
522        for (String interfaceName : interfaceNames) {
523          if (InetAddresses.isInetAddress(interfaceName)) {
524            localAddrs.add(new InetSocketAddress(interfaceName, 0));
525          } else if (NetUtils.isValidSubnet(interfaceName)) {
526            for (InetAddress addr : NetUtils.getIPs(interfaceName, false)) {
527              localAddrs.add(new InetSocketAddress(addr, 0));
528            }
529          } else {
530            for (String ip : DNS.getIPs(interfaceName, false)) {
531              localAddrs.add(new InetSocketAddress(ip, 0));
532            }
533          }
534        }
535        return localAddrs.toArray(new SocketAddress[localAddrs.size()]);
536      }
537    
538      /**
539       * Select one of the configured local interfaces at random. We use a random
540       * interface because other policies like round-robin are less effective
541       * given that we cache connections to datanodes.
542       *
543       * @return one of the local interface addresses at random, or null if no
544       *    local interfaces are configured
545       */
546      SocketAddress getRandomLocalInterfaceAddr() {
547        if (localInterfaceAddrs.length == 0) {
548          return null;
549        }
550        final int idx = r.nextInt(localInterfaceAddrs.length);
551        final SocketAddress addr = localInterfaceAddrs[idx];
552        if (LOG.isDebugEnabled()) {
553          LOG.debug("Using local interface " + addr);
554        }
555        return addr;
556      }
557    
558      /**
559       * Return the number of times the client should go back to the namenode
560       * to retrieve block locations when reading.
561       */
562      int getMaxBlockAcquireFailures() {
563        return dfsClientConf.maxBlockAcquireFailures;
564      }
565    
566      /**
567       * Return the timeout that clients should use when writing to datanodes.
568       * @param numNodes the number of nodes in the pipeline.
569       */
570      int getDatanodeWriteTimeout(int numNodes) {
571        return (dfsClientConf.confTime > 0) ?
572          (dfsClientConf.confTime + HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * numNodes) : 0;
573      }
574    
575      int getDatanodeReadTimeout(int numNodes) {
576        return dfsClientConf.socketTimeout > 0 ?
577            (HdfsServerConstants.READ_TIMEOUT_EXTENSION * numNodes +
578                dfsClientConf.socketTimeout) : 0;
579      }
580      
581      int getHdfsTimeout() {
582        return dfsClientConf.hdfsTimeout;
583      }
584      
585      String getClientName() {
586        return clientName;
587      }
588    
589      void checkOpen() throws IOException {
590        if (!clientRunning) {
591          IOException result = new IOException("Filesystem closed");
592          throw result;
593        }
594      }
595    
596      /** Return the lease renewer instance. The renewer thread won't start
597       *  until the first output stream is created. The same instance will
598       *  be returned until all output streams are closed.
599       */
600      public LeaseRenewer getLeaseRenewer() throws IOException {
601          return LeaseRenewer.getInstance(authority, ugi, this);
602      }
603    
604      /** Get a lease and start automatic renewal */
605      private void beginFileLease(final String src, final DFSOutputStream out) 
606          throws IOException {
607        getLeaseRenewer().put(src, out, this);
608      }
609    
610      /** Stop renewal of lease for the file. */
611      void endFileLease(final String src) throws IOException {
612        getLeaseRenewer().closeFile(src, this);
613      }
614        
615    
616      /** Put a file. Only called from LeaseRenewer, where proper locking is
617       *  enforced to consistently update its local dfsclients array and 
618       *  client's filesBeingWritten map.
619       */
620      void putFileBeingWritten(final String src, final DFSOutputStream out) {
621        synchronized(filesBeingWritten) {
622          filesBeingWritten.put(src, out);
623          // update the last lease renewal time only when there was no
624          // writes. once there is one write stream open, the lease renewer
625          // thread keeps it updated well with in anyone's expiration time.
626          if (lastLeaseRenewal == 0) {
627            updateLastLeaseRenewal();
628          }
629        }
630      }
631    
632      /** Remove a file. Only called from LeaseRenewer. */
633      void removeFileBeingWritten(final String src) {
634        synchronized(filesBeingWritten) {
635          filesBeingWritten.remove(src);
636          if (filesBeingWritten.isEmpty()) {
637            lastLeaseRenewal = 0;
638          }
639        }
640      }
641    
642      /** Is file-being-written map empty? */
643      boolean isFilesBeingWrittenEmpty() {
644        synchronized(filesBeingWritten) {
645          return filesBeingWritten.isEmpty();
646        }
647      }
648      
649      /** @return true if the client is running */
650      boolean isClientRunning() {
651        return clientRunning;
652      }
653    
654      long getLastLeaseRenewal() {
655        return lastLeaseRenewal;
656      }
657    
658      void updateLastLeaseRenewal() {
659        synchronized(filesBeingWritten) {
660          if (filesBeingWritten.isEmpty()) {
661            return;
662          }
663          lastLeaseRenewal = Time.now();
664        }
665      }
666    
667      /**
668       * Renew leases.
669       * @return true if lease was renewed. May return false if this
670       * client has been closed or has no files open.
671       **/
672      boolean renewLease() throws IOException {
673        if (clientRunning && !isFilesBeingWrittenEmpty()) {
674          try {
675            namenode.renewLease(clientName);
676            updateLastLeaseRenewal();
677            return true;
678          } catch (IOException e) {
679            // Abort if the lease has already expired. 
680            final long elapsed = Time.now() - getLastLeaseRenewal();
681            if (elapsed > HdfsConstants.LEASE_HARDLIMIT_PERIOD) {
682              LOG.warn("Failed to renew lease for " + clientName + " for "
683                  + (elapsed/1000) + " seconds (>= soft-limit ="
684                  + (HdfsConstants.LEASE_HARDLIMIT_PERIOD/1000) + " seconds.) "
685                  + "Closing all files being written ...", e);
686              closeAllFilesBeingWritten(true);
687            } else {
688              // Let the lease renewer handle it and retry.
689              throw e;
690            }
691          }
692        }
693        return false;
694      }
695      
696      /**
697       * Close connections the Namenode.
698       */
699      void closeConnectionToNamenode() {
700        RPC.stopProxy(namenode);
701      }
702      
703      /** Abort and release resources held.  Ignore all errors. */
704      void abort() {
705        clientRunning = false;
706        closeAllFilesBeingWritten(true);
707    
708        try {
709          // remove reference to this client and stop the renewer,
710          // if there is no more clients under the renewer.
711          getLeaseRenewer().closeClient(this);
712        } catch (IOException ioe) {
713           LOG.info("Exception occurred while aborting the client " + ioe);
714        }
715        closeConnectionToNamenode();
716      }
717    
718      /** Close/abort all files being written. */
719      private void closeAllFilesBeingWritten(final boolean abort) {
720        for(;;) {
721          final String src;
722          final DFSOutputStream out;
723          synchronized(filesBeingWritten) {
724            if (filesBeingWritten.isEmpty()) {
725              return;
726            }
727            src = filesBeingWritten.keySet().iterator().next();
728            out = filesBeingWritten.remove(src);
729          }
730          if (out != null) {
731            try {
732              if (abort) {
733                out.abort();
734              } else {
735                out.close();
736              }
737            } catch(IOException ie) {
738              LOG.error("Failed to " + (abort? "abort": "close") + " file " + src,
739                  ie);
740            }
741          }
742        }
743      }
744    
745      /**
746       * Close the file system, abandoning all of the leases and files being
747       * created and close connections to the namenode.
748       */
749      @Override
750      public synchronized void close() throws IOException {
751        if(clientRunning) {
752          closeAllFilesBeingWritten(false);
753          clientRunning = false;
754          getLeaseRenewer().closeClient(this);
755          // close connections to the namenode
756          closeConnectionToNamenode();
757        }
758      }
759    
760      /**
761       * Get the default block size for this cluster
762       * @return the default block size in bytes
763       */
764      public long getDefaultBlockSize() {
765        return dfsClientConf.defaultBlockSize;
766      }
767        
768      /**
769       * @see ClientProtocol#getPreferredBlockSize(String)
770       */
771      public long getBlockSize(String f) throws IOException {
772        try {
773          return namenode.getPreferredBlockSize(f);
774        } catch (IOException ie) {
775          LOG.warn("Problem getting block size", ie);
776          throw ie;
777        }
778      }
779    
780      /**
781       * Get server default values for a number of configuration params.
782       * @see ClientProtocol#getServerDefaults()
783       */
784      public FsServerDefaults getServerDefaults() throws IOException {
785        long now = Time.now();
786        if (now - serverDefaultsLastUpdate > SERVER_DEFAULTS_VALIDITY_PERIOD) {
787          serverDefaults = namenode.getServerDefaults();
788          serverDefaultsLastUpdate = now;
789        }
790        return serverDefaults;
791      }
792      
793      /**
794       * Get a canonical token service name for this client's tokens.  Null should
795       * be returned if the client is not using tokens.
796       * @return the token service for the client
797       */
798      @InterfaceAudience.LimitedPrivate( { "HDFS" }) 
799      public String getCanonicalServiceName() {
800        return (dtService != null) ? dtService.toString() : null;
801      }
802      
803      /**
804       * @see ClientProtocol#getDelegationToken(Text)
805       */
806      public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
807          throws IOException {
808        assert dtService != null;
809        Token<DelegationTokenIdentifier> token =
810          namenode.getDelegationToken(renewer);
811        token.setService(this.dtService);
812    
813        LOG.info("Created " + DelegationTokenIdentifier.stringifyToken(token));
814        return token;
815      }
816    
817      /**
818       * Renew a delegation token
819       * @param token the token to renew
820       * @return the new expiration time
821       * @throws InvalidToken
822       * @throws IOException
823       * @deprecated Use Token.renew instead.
824       */
825      @Deprecated
826      public long renewDelegationToken(Token<DelegationTokenIdentifier> token)
827          throws InvalidToken, IOException {
828        LOG.info("Renewing " + DelegationTokenIdentifier.stringifyToken(token));
829        try {
830          return token.renew(conf);
831        } catch (InterruptedException ie) {                                       
832          throw new RuntimeException("caught interrupted", ie);
833        } catch (RemoteException re) {
834          throw re.unwrapRemoteException(InvalidToken.class,
835                                         AccessControlException.class);
836        }
837      }
838      
839      private static Map<String, Boolean> localAddrMap = Collections
840          .synchronizedMap(new HashMap<String, Boolean>());
841      
842      static boolean isLocalAddress(InetSocketAddress targetAddr) {
843        InetAddress addr = targetAddr.getAddress();
844        Boolean cached = localAddrMap.get(addr.getHostAddress());
845        if (cached != null) {
846          if (LOG.isTraceEnabled()) {
847            LOG.trace("Address " + targetAddr +
848                      (cached ? " is local" : " is not local"));
849          }
850          return cached;
851        }
852        
853        boolean local = NetUtils.isLocalAddress(addr);
854    
855        if (LOG.isTraceEnabled()) {
856          LOG.trace("Address " + targetAddr +
857                    (local ? " is local" : " is not local"));
858        }
859        localAddrMap.put(addr.getHostAddress(), local);
860        return local;
861      }
862      
863      /**
864       * Should the block access token be refetched on an exception
865       * 
866       * @param ex Exception received
867       * @param targetAddr Target datanode address from where exception was received
868       * @return true if block access token has expired or invalid and it should be
869       *         refetched
870       */
871      private static boolean tokenRefetchNeeded(IOException ex,
872          InetSocketAddress targetAddr) {
873        /*
874         * Get a new access token and retry. Retry is needed in 2 cases. 1) When
875         * both NN and DN re-started while DFSClient holding a cached access token.
876         * 2) In the case that NN fails to update its access key at pre-set interval
877         * (by a wide margin) and subsequently restarts. In this case, DN
878         * re-registers itself with NN and receives a new access key, but DN will
879         * delete the old access key from its memory since it's considered expired
880         * based on the estimated expiration date.
881         */
882        if (ex instanceof InvalidBlockTokenException || ex instanceof InvalidToken) {
883          LOG.info("Access token was invalid when connecting to " + targetAddr
884              + " : " + ex);
885          return true;
886        }
887        return false;
888      }
889      
890      /**
891       * Cancel a delegation token
892       * @param token the token to cancel
893       * @throws InvalidToken
894       * @throws IOException
895       * @deprecated Use Token.cancel instead.
896       */
897      @Deprecated
898      public void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
899          throws InvalidToken, IOException {
900        LOG.info("Cancelling " + DelegationTokenIdentifier.stringifyToken(token));
901        try {
902          token.cancel(conf);
903         } catch (InterruptedException ie) {                                       
904          throw new RuntimeException("caught interrupted", ie);
905        } catch (RemoteException re) {
906          throw re.unwrapRemoteException(InvalidToken.class,
907                                         AccessControlException.class);
908        }
909      }
910      
911      @InterfaceAudience.Private
912      public static class Renewer extends TokenRenewer {
913        
914        static {
915          //Ensure that HDFS Configuration files are loaded before trying to use
916          // the renewer.
917          HdfsConfiguration.init();
918        }
919        
920        @Override
921        public boolean handleKind(Text kind) {
922          return DelegationTokenIdentifier.HDFS_DELEGATION_KIND.equals(kind);
923        }
924    
925        @SuppressWarnings("unchecked")
926        @Override
927        public long renew(Token<?> token, Configuration conf) throws IOException {
928          Token<DelegationTokenIdentifier> delToken = 
929            (Token<DelegationTokenIdentifier>) token;
930          ClientProtocol nn = getNNProxy(delToken, conf);
931          try {
932            return nn.renewDelegationToken(delToken);
933          } catch (RemoteException re) {
934            throw re.unwrapRemoteException(InvalidToken.class, 
935                                           AccessControlException.class);
936          }
937        }
938    
939        @SuppressWarnings("unchecked")
940        @Override
941        public void cancel(Token<?> token, Configuration conf) throws IOException {
942          Token<DelegationTokenIdentifier> delToken = 
943              (Token<DelegationTokenIdentifier>) token;
944          LOG.info("Cancelling " + 
945                   DelegationTokenIdentifier.stringifyToken(delToken));
946          ClientProtocol nn = getNNProxy(delToken, conf);
947          try {
948            nn.cancelDelegationToken(delToken);
949          } catch (RemoteException re) {
950            throw re.unwrapRemoteException(InvalidToken.class,
951                AccessControlException.class);
952          }
953        }
954        
955        private static ClientProtocol getNNProxy(
956            Token<DelegationTokenIdentifier> token, Configuration conf)
957            throws IOException {
958          URI uri = HAUtil.getServiceUriFromToken(token);
959          if (HAUtil.isTokenForLogicalUri(token) &&
960              !HAUtil.isLogicalUri(conf, uri)) {
961            // If the token is for a logical nameservice, but the configuration
962            // we have disagrees about that, we can't actually renew it.
963            // This can be the case in MR, for example, if the RM doesn't
964            // have all of the HA clusters configured in its configuration.
965            throw new IOException("Unable to map logical nameservice URI '" +
966                uri + "' to a NameNode. Local configuration does not have " +
967                "a failover proxy provider configured.");
968          }
969          
970          NameNodeProxies.ProxyAndInfo<ClientProtocol> info =
971            NameNodeProxies.createProxy(conf, uri, ClientProtocol.class);
972          assert info.getDelegationTokenService().equals(token.getService()) :
973            "Returned service '" + info.getDelegationTokenService().toString() +
974            "' doesn't match expected service '" +
975            token.getService().toString() + "'";
976            
977          return info.getProxy();
978        }
979    
980        @Override
981        public boolean isManaged(Token<?> token) throws IOException {
982          return true;
983        }
984        
985      }
986    
987      /**
988       * Report corrupt blocks that were discovered by the client.
989       * @see ClientProtocol#reportBadBlocks(LocatedBlock[])
990       */
991      public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
992        namenode.reportBadBlocks(blocks);
993      }
994      
995      public short getDefaultReplication() {
996        return dfsClientConf.defaultReplication;
997      }
998      
999      public LocatedBlocks getLocatedBlocks(String src, long start)
1000          throws IOException {
1001        return getLocatedBlocks(src, start, dfsClientConf.prefetchSize);
1002      }
1003    
1004      /*
1005       * This is just a wrapper around callGetBlockLocations, but non-static so that
1006       * we can stub it out for tests.
1007       */
1008      @VisibleForTesting
1009      public LocatedBlocks getLocatedBlocks(String src, long start, long length)
1010          throws IOException {
1011        return callGetBlockLocations(namenode, src, start, length);
1012      }
1013    
1014      /**
1015       * @see ClientProtocol#getBlockLocations(String, long, long)
1016       */
1017      static LocatedBlocks callGetBlockLocations(ClientProtocol namenode,
1018          String src, long start, long length) 
1019          throws IOException {
1020        try {
1021          return namenode.getBlockLocations(src, start, length);
1022        } catch(RemoteException re) {
1023          throw re.unwrapRemoteException(AccessControlException.class,
1024                                         FileNotFoundException.class,
1025                                         UnresolvedPathException.class);
1026        }
1027      }
1028    
1029      /**
1030       * Recover a file's lease
1031       * @param src a file's path
1032       * @return true if the file is already closed
1033       * @throws IOException
1034       */
1035      boolean recoverLease(String src) throws IOException {
1036        checkOpen();
1037    
1038        try {
1039          return namenode.recoverLease(src, clientName);
1040        } catch (RemoteException re) {
1041          throw re.unwrapRemoteException(FileNotFoundException.class,
1042                                         AccessControlException.class,
1043                                         UnresolvedPathException.class);
1044        }
1045      }
1046    
1047      /**
1048       * Get block location info about file
1049       * 
1050       * getBlockLocations() returns a list of hostnames that store 
1051       * data for a specific file region.  It returns a set of hostnames
1052       * for every block within the indicated region.
1053       *
1054       * This function is very useful when writing code that considers
1055       * data-placement when performing operations.  For example, the
1056       * MapReduce system tries to schedule tasks on the same machines
1057       * as the data-block the task processes. 
1058       */
1059      public BlockLocation[] getBlockLocations(String src, long start, 
1060        long length) throws IOException, UnresolvedLinkException {
1061        LocatedBlocks blocks = getLocatedBlocks(src, start, length);
1062        BlockLocation[] locations =  DFSUtil.locatedBlocks2Locations(blocks);
1063        HdfsBlockLocation[] hdfsLocations = new HdfsBlockLocation[locations.length];
1064        for (int i = 0; i < locations.length; i++) {
1065          hdfsLocations[i] = new HdfsBlockLocation(locations[i], blocks.get(i));
1066        }
1067        return hdfsLocations;
1068      }
1069      
1070      /**
1071       * Get block location information about a list of {@link HdfsBlockLocation}.
1072       * Used by {@link DistributedFileSystem#getFileBlockStorageLocations(List)} to
1073       * get {@link BlockStorageLocation}s for blocks returned by
1074       * {@link DistributedFileSystem#getFileBlockLocations(org.apache.hadoop.fs.FileStatus, long, long)}
1075       * .
1076       * 
1077       * This is done by making a round of RPCs to the associated datanodes, asking
1078       * the volume of each block replica. The returned array of
1079       * {@link BlockStorageLocation} expose this information as a
1080       * {@link VolumeId}.
1081       * 
1082       * @param blockLocations
1083       *          target blocks on which to query volume location information
1084       * @return volumeBlockLocations original block array augmented with additional
1085       *         volume location information for each replica.
1086       */
1087      public BlockStorageLocation[] getBlockStorageLocations(
1088          List<BlockLocation> blockLocations) throws IOException,
1089          UnsupportedOperationException, InvalidBlockTokenException {
1090        if (!getConf().getHdfsBlocksMetadataEnabled) {
1091          throw new UnsupportedOperationException("Datanode-side support for " +
1092              "getVolumeBlockLocations() must also be enabled in the client " +
1093              "configuration.");
1094        }
1095        // Downcast blockLocations and fetch out required LocatedBlock(s)
1096        List<LocatedBlock> blocks = new ArrayList<LocatedBlock>();
1097        for (BlockLocation loc : blockLocations) {
1098          if (!(loc instanceof HdfsBlockLocation)) {
1099            throw new ClassCastException("DFSClient#getVolumeBlockLocations " +
1100                "expected to be passed HdfsBlockLocations");
1101          }
1102          HdfsBlockLocation hdfsLoc = (HdfsBlockLocation) loc;
1103          blocks.add(hdfsLoc.getLocatedBlock());
1104        }
1105        
1106        // Re-group the LocatedBlocks to be grouped by datanodes, with the values
1107        // a list of the LocatedBlocks on the datanode.
1108        Map<DatanodeInfo, List<LocatedBlock>> datanodeBlocks = 
1109            new LinkedHashMap<DatanodeInfo, List<LocatedBlock>>();
1110        for (LocatedBlock b : blocks) {
1111          for (DatanodeInfo info : b.getLocations()) {
1112            if (!datanodeBlocks.containsKey(info)) {
1113              datanodeBlocks.put(info, new ArrayList<LocatedBlock>());
1114            }
1115            List<LocatedBlock> l = datanodeBlocks.get(info);
1116            l.add(b);
1117          }
1118        }
1119            
1120        // Make RPCs to the datanodes to get volume locations for its replicas
1121        List<HdfsBlocksMetadata> metadatas = BlockStorageLocationUtil
1122            .queryDatanodesForHdfsBlocksMetadata(conf, datanodeBlocks,
1123                getConf().getFileBlockStorageLocationsNumThreads,
1124                getConf().getFileBlockStorageLocationsTimeout,
1125                getConf().connectToDnViaHostname);
1126        
1127        // Regroup the returned VolumeId metadata to again be grouped by
1128        // LocatedBlock rather than by datanode
1129        Map<LocatedBlock, List<VolumeId>> blockVolumeIds = BlockStorageLocationUtil
1130            .associateVolumeIdsWithBlocks(blocks, datanodeBlocks, metadatas);
1131        
1132        // Combine original BlockLocations with new VolumeId information
1133        BlockStorageLocation[] volumeBlockLocations = BlockStorageLocationUtil
1134            .convertToVolumeBlockLocations(blocks, blockVolumeIds);
1135    
1136        return volumeBlockLocations;
1137      }
1138      
1139      public DFSInputStream open(String src) 
1140          throws IOException, UnresolvedLinkException {
1141        return open(src, dfsClientConf.ioBufferSize, true, null);
1142      }
1143    
1144      /**
1145       * Create an input stream that obtains a nodelist from the
1146       * namenode, and then reads from all the right places.  Creates
1147       * inner subclass of InputStream that does the right out-of-band
1148       * work.
1149       * @deprecated Use {@link #open(String, int, boolean)} instead.
1150       */
1151      @Deprecated
1152      public DFSInputStream open(String src, int buffersize, boolean verifyChecksum,
1153                                 FileSystem.Statistics stats)
1154          throws IOException, UnresolvedLinkException {
1155        return open(src, buffersize, verifyChecksum);
1156      }
1157      
1158    
1159      /**
1160       * Create an input stream that obtains a nodelist from the
1161       * namenode, and then reads from all the right places.  Creates
1162       * inner subclass of InputStream that does the right out-of-band
1163       * work.
1164       */
1165      public DFSInputStream open(String src, int buffersize, boolean verifyChecksum)
1166          throws IOException, UnresolvedLinkException {
1167        checkOpen();
1168        //    Get block info from namenode
1169        return new DFSInputStream(this, src, buffersize, verifyChecksum);
1170      }
1171    
1172      /**
1173       * Get the namenode associated with this DFSClient object
1174       * @return the namenode associated with this DFSClient object
1175       */
1176      public ClientProtocol getNamenode() {
1177        return namenode;
1178      }
1179      
1180      /**
1181       * Call {@link #create(String, boolean, short, long, Progressable)} with
1182       * default <code>replication</code> and <code>blockSize<code> and null <code>
1183       * progress</code>.
1184       */
1185      public OutputStream create(String src, boolean overwrite) 
1186          throws IOException {
1187        return create(src, overwrite, dfsClientConf.defaultReplication,
1188            dfsClientConf.defaultBlockSize, null);
1189      }
1190        
1191      /**
1192       * Call {@link #create(String, boolean, short, long, Progressable)} with
1193       * default <code>replication</code> and <code>blockSize<code>.
1194       */
1195      public OutputStream create(String src, 
1196                                 boolean overwrite,
1197                                 Progressable progress) throws IOException {
1198        return create(src, overwrite, dfsClientConf.defaultReplication,
1199            dfsClientConf.defaultBlockSize, progress);
1200      }
1201        
1202      /**
1203       * Call {@link #create(String, boolean, short, long, Progressable)} with
1204       * null <code>progress</code>.
1205       */
1206      public OutputStream create(String src, 
1207                                 boolean overwrite, 
1208                                 short replication,
1209                                 long blockSize) throws IOException {
1210        return create(src, overwrite, replication, blockSize, null);
1211      }
1212    
1213      /**
1214       * Call {@link #create(String, boolean, short, long, Progressable, int)}
1215       * with default bufferSize.
1216       */
1217      public OutputStream create(String src, boolean overwrite, short replication,
1218          long blockSize, Progressable progress) throws IOException {
1219        return create(src, overwrite, replication, blockSize, progress,
1220            dfsClientConf.ioBufferSize);
1221      }
1222    
1223      /**
1224       * Call {@link #create(String, FsPermission, EnumSet, short, long, 
1225       * Progressable, int, ChecksumOpt)} with default <code>permission</code>
1226       * {@link FsPermission#getFileDefault()}.
1227       * 
1228       * @param src File name
1229       * @param overwrite overwrite an existing file if true
1230       * @param replication replication factor for the file
1231       * @param blockSize maximum block size
1232       * @param progress interface for reporting client progress
1233       * @param buffersize underlying buffersize
1234       * 
1235       * @return output stream
1236       */
1237      public OutputStream create(String src,
1238                                 boolean overwrite,
1239                                 short replication,
1240                                 long blockSize,
1241                                 Progressable progress,
1242                                 int buffersize)
1243          throws IOException {
1244        return create(src, FsPermission.getFileDefault(),
1245            overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
1246                : EnumSet.of(CreateFlag.CREATE), replication, blockSize, progress,
1247            buffersize, null);
1248      }
1249    
1250      /**
1251       * Call {@link #create(String, FsPermission, EnumSet, boolean, short, 
1252       * long, Progressable, int, ChecksumOpt)} with <code>createParent</code>
1253       *  set to true.
1254       */
1255      public DFSOutputStream create(String src, 
1256                                 FsPermission permission,
1257                                 EnumSet<CreateFlag> flag, 
1258                                 short replication,
1259                                 long blockSize,
1260                                 Progressable progress,
1261                                 int buffersize,
1262                                 ChecksumOpt checksumOpt)
1263          throws IOException {
1264        return create(src, permission, flag, true,
1265            replication, blockSize, progress, buffersize, checksumOpt, null);
1266      }
1267    
1268      /**
1269       * Create a new dfs file with the specified block replication 
1270       * with write-progress reporting and return an output stream for writing
1271       * into the file.  
1272       * 
1273       * @param src File name
1274       * @param permission The permission of the directory being created.
1275       *          If null, use default permission {@link FsPermission#getFileDefault()}
1276       * @param flag indicates create a new file or create/overwrite an
1277       *          existing file or append to an existing file
1278       * @param createParent create missing parent directory if true
1279       * @param replication block replication
1280       * @param blockSize maximum block size
1281       * @param progress interface for reporting client progress
1282       * @param buffersize underlying buffer size 
1283       * @param checksumOpt checksum options
1284       * 
1285       * @return output stream
1286       * 
1287       * @see ClientProtocol#create(String, FsPermission, String, EnumSetWritable,
1288       * boolean, short, long) for detailed description of exceptions thrown
1289       */
1290      public DFSOutputStream create(String src, 
1291                                 FsPermission permission,
1292                                 EnumSet<CreateFlag> flag, 
1293                                 boolean createParent,
1294                                 short replication,
1295                                 long blockSize,
1296                                 Progressable progress,
1297                                 int buffersize,
1298                                 ChecksumOpt checksumOpt) throws IOException {
1299        return create(src, permission, flag, createParent, replication, blockSize, 
1300            progress, buffersize, checksumOpt, null);
1301      }
1302    
1303      /**
1304       * Same as {@link #create(String, FsPermission, EnumSet, boolean, short, long,
1305       * Progressable, int, ChecksumOpt)} with the addition of favoredNodes that is
1306       * a hint to where the namenode should place the file blocks.
1307       * The favored nodes hint is not persisted in HDFS. Hence it may be honored
1308       * at the creation time only. HDFS could move the blocks during balancing or
1309       * replication, to move the blocks from favored nodes. A value of null means
1310       * no favored nodes for this create
1311       */
1312      public DFSOutputStream create(String src, 
1313                                 FsPermission permission,
1314                                 EnumSet<CreateFlag> flag, 
1315                                 boolean createParent,
1316                                 short replication,
1317                                 long blockSize,
1318                                 Progressable progress,
1319                                 int buffersize,
1320                                 ChecksumOpt checksumOpt,
1321                                 InetSocketAddress[] favoredNodes) throws IOException {
1322        checkOpen();
1323        if (permission == null) {
1324          permission = FsPermission.getFileDefault();
1325        }
1326        FsPermission masked = permission.applyUMask(dfsClientConf.uMask);
1327        if(LOG.isDebugEnabled()) {
1328          LOG.debug(src + ": masked=" + masked);
1329        }
1330        String[] favoredNodeStrs = null;
1331        if (favoredNodes != null) {
1332          favoredNodeStrs = new String[favoredNodes.length];
1333          for (int i = 0; i < favoredNodes.length; i++) {
1334            favoredNodeStrs[i] = 
1335                favoredNodes[i].getHostName() + ":" 
1336                             + favoredNodes[i].getPort();
1337          }
1338        }
1339        final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
1340            src, masked, flag, createParent, replication, blockSize, progress,
1341            buffersize, dfsClientConf.createChecksum(checksumOpt), favoredNodeStrs);
1342        beginFileLease(src, result);
1343        return result;
1344      }
1345      
1346      /**
1347       * Append to an existing file if {@link CreateFlag#APPEND} is present
1348       */
1349      private DFSOutputStream primitiveAppend(String src, EnumSet<CreateFlag> flag,
1350          int buffersize, Progressable progress) throws IOException {
1351        if (flag.contains(CreateFlag.APPEND)) {
1352          HdfsFileStatus stat = getFileInfo(src);
1353          if (stat == null) { // No file to append to
1354            // New file needs to be created if create option is present
1355            if (!flag.contains(CreateFlag.CREATE)) {
1356              throw new FileNotFoundException("failed to append to non-existent file "
1357                  + src + " on client " + clientName);
1358            }
1359            return null;
1360          }
1361          return callAppend(stat, src, buffersize, progress);
1362        }
1363        return null;
1364      }
1365      
1366      /**
1367       * Same as {{@link #create(String, FsPermission, EnumSet, short, long,
1368       *  Progressable, int, ChecksumOpt)} except that the permission
1369       *  is absolute (ie has already been masked with umask.
1370       */
1371      public DFSOutputStream primitiveCreate(String src, 
1372                                 FsPermission absPermission,
1373                                 EnumSet<CreateFlag> flag,
1374                                 boolean createParent,
1375                                 short replication,
1376                                 long blockSize,
1377                                 Progressable progress,
1378                                 int buffersize,
1379                                 ChecksumOpt checksumOpt)
1380          throws IOException, UnresolvedLinkException {
1381        checkOpen();
1382        CreateFlag.validate(flag);
1383        DFSOutputStream result = primitiveAppend(src, flag, buffersize, progress);
1384        if (result == null) {
1385          DataChecksum checksum = dfsClientConf.createChecksum(checksumOpt);
1386          result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
1387              flag, createParent, replication, blockSize, progress, buffersize,
1388              checksum);
1389        }
1390        beginFileLease(src, result);
1391        return result;
1392      }
1393      
1394      /**
1395       * Creates a symbolic link.
1396       * 
1397       * @see ClientProtocol#createSymlink(String, String,FsPermission, boolean) 
1398       */
1399      public void createSymlink(String target, String link, boolean createParent)
1400          throws IOException {
1401        try {
1402          FsPermission dirPerm = 
1403              FsPermission.getDefault().applyUMask(dfsClientConf.uMask); 
1404          namenode.createSymlink(target, link, dirPerm, createParent);
1405        } catch (RemoteException re) {
1406          throw re.unwrapRemoteException(AccessControlException.class,
1407                                         FileAlreadyExistsException.class, 
1408                                         FileNotFoundException.class,
1409                                         ParentNotDirectoryException.class,
1410                                         NSQuotaExceededException.class, 
1411                                         DSQuotaExceededException.class,
1412                                         UnresolvedPathException.class,
1413                                         SnapshotAccessControlException.class);
1414        }
1415      }
1416    
1417      /**
1418       * Resolve the *first* symlink, if any, in the path.
1419       * 
1420       * @see ClientProtocol#getLinkTarget(String)
1421       */
1422      public String getLinkTarget(String path) throws IOException { 
1423        checkOpen();
1424        try {
1425          return namenode.getLinkTarget(path);
1426        } catch (RemoteException re) {
1427          throw re.unwrapRemoteException(AccessControlException.class,
1428                                         FileNotFoundException.class);
1429        }
1430      }
1431    
1432      /** Method to get stream returned by append call */
1433      private DFSOutputStream callAppend(HdfsFileStatus stat, String src,
1434          int buffersize, Progressable progress) throws IOException {
1435        LocatedBlock lastBlock = null;
1436        try {
1437          lastBlock = namenode.append(src, clientName);
1438        } catch(RemoteException re) {
1439          throw re.unwrapRemoteException(AccessControlException.class,
1440                                         FileNotFoundException.class,
1441                                         SafeModeException.class,
1442                                         DSQuotaExceededException.class,
1443                                         UnsupportedOperationException.class,
1444                                         UnresolvedPathException.class,
1445                                         SnapshotAccessControlException.class);
1446        }
1447        return DFSOutputStream.newStreamForAppend(this, src, buffersize, progress,
1448            lastBlock, stat, dfsClientConf.createChecksum());
1449      }
1450      
1451      /**
1452       * Append to an existing HDFS file.  
1453       * 
1454       * @param src file name
1455       * @param buffersize buffer size
1456       * @param progress for reporting write-progress; null is acceptable.
1457       * @param statistics file system statistics; null is acceptable.
1458       * @return an output stream for writing into the file
1459       * 
1460       * @see ClientProtocol#append(String, String) 
1461       */
1462      public HdfsDataOutputStream append(final String src, final int buffersize,
1463          final Progressable progress, final FileSystem.Statistics statistics
1464          ) throws IOException {
1465        final DFSOutputStream out = append(src, buffersize, progress);
1466        return new HdfsDataOutputStream(out, statistics, out.getInitialLen());
1467      }
1468    
1469      private DFSOutputStream append(String src, int buffersize, Progressable progress) 
1470          throws IOException {
1471        checkOpen();
1472        HdfsFileStatus stat = getFileInfo(src);
1473        if (stat == null) { // No file found
1474          throw new FileNotFoundException("failed to append to non-existent file "
1475              + src + " on client " + clientName);
1476        }
1477        final DFSOutputStream result = callAppend(stat, src, buffersize, progress);
1478        beginFileLease(src, result);
1479        return result;
1480      }
1481    
1482      /**
1483       * Set replication for an existing file.
1484       * @param src file name
1485       * @param replication
1486       * 
1487       * @see ClientProtocol#setReplication(String, short)
1488       */
1489      public boolean setReplication(String src, short replication)
1490          throws IOException {
1491        try {
1492          return namenode.setReplication(src, replication);
1493        } catch(RemoteException re) {
1494          throw re.unwrapRemoteException(AccessControlException.class,
1495                                         FileNotFoundException.class,
1496                                         SafeModeException.class,
1497                                         DSQuotaExceededException.class,
1498                                         UnresolvedPathException.class,
1499                                         SnapshotAccessControlException.class);
1500        }
1501      }
1502    
1503      /**
1504       * Rename file or directory.
1505       * @see ClientProtocol#rename(String, String)
1506       * @deprecated Use {@link #rename(String, String, Options.Rename...)} instead.
1507       */
1508      @Deprecated
1509      public boolean rename(String src, String dst) throws IOException {
1510        checkOpen();
1511        try {
1512          return namenode.rename(src, dst);
1513        } catch(RemoteException re) {
1514          throw re.unwrapRemoteException(AccessControlException.class,
1515                                         NSQuotaExceededException.class,
1516                                         DSQuotaExceededException.class,
1517                                         UnresolvedPathException.class,
1518                                         SnapshotAccessControlException.class);
1519        }
1520      }
1521    
1522      /**
1523       * Move blocks from src to trg and delete src
1524       * See {@link ClientProtocol#concat(String, String [])}. 
1525       */
1526      public void concat(String trg, String [] srcs) throws IOException {
1527        checkOpen();
1528        try {
1529          namenode.concat(trg, srcs);
1530        } catch(RemoteException re) {
1531          throw re.unwrapRemoteException(AccessControlException.class,
1532                                         UnresolvedPathException.class,
1533                                         SnapshotAccessControlException.class);
1534        }
1535      }
1536      /**
1537       * Rename file or directory.
1538       * @see ClientProtocol#rename2(String, String, Options.Rename...)
1539       */
1540      public void rename(String src, String dst, Options.Rename... options)
1541          throws IOException {
1542        checkOpen();
1543        try {
1544          namenode.rename2(src, dst, options);
1545        } catch(RemoteException re) {
1546          throw re.unwrapRemoteException(AccessControlException.class,
1547                                         DSQuotaExceededException.class,
1548                                         FileAlreadyExistsException.class,
1549                                         FileNotFoundException.class,
1550                                         ParentNotDirectoryException.class,
1551                                         SafeModeException.class,
1552                                         NSQuotaExceededException.class,
1553                                         UnresolvedPathException.class,
1554                                         SnapshotAccessControlException.class);
1555        }
1556      }
1557      /**
1558       * Delete file or directory.
1559       * See {@link ClientProtocol#delete(String, boolean)}. 
1560       */
1561      @Deprecated
1562      public boolean delete(String src) throws IOException {
1563        checkOpen();
1564        return namenode.delete(src, true);
1565      }
1566    
1567      /**
1568       * delete file or directory.
1569       * delete contents of the directory if non empty and recursive 
1570       * set to true
1571       *
1572       * @see ClientProtocol#delete(String, boolean)
1573       */
1574      public boolean delete(String src, boolean recursive) throws IOException {
1575        checkOpen();
1576        try {
1577          return namenode.delete(src, recursive);
1578        } catch(RemoteException re) {
1579          throw re.unwrapRemoteException(AccessControlException.class,
1580                                         FileNotFoundException.class,
1581                                         SafeModeException.class,
1582                                         UnresolvedPathException.class,
1583                                         SnapshotAccessControlException.class);
1584        }
1585      }
1586      
1587      /** Implemented using getFileInfo(src)
1588       */
1589      public boolean exists(String src) throws IOException {
1590        checkOpen();
1591        return getFileInfo(src) != null;
1592      }
1593    
1594      /**
1595       * Get a partial listing of the indicated directory
1596       * No block locations need to be fetched
1597       */
1598      public DirectoryListing listPaths(String src,  byte[] startAfter)
1599        throws IOException {
1600        return listPaths(src, startAfter, false);
1601      }
1602      
1603      /**
1604       * Get a partial listing of the indicated directory
1605       *
1606       * Recommend to use HdfsFileStatus.EMPTY_NAME as startAfter
1607       * if the application wants to fetch a listing starting from
1608       * the first entry in the directory
1609       *
1610       * @see ClientProtocol#getListing(String, byte[], boolean)
1611       */
1612      public DirectoryListing listPaths(String src,  byte[] startAfter,
1613          boolean needLocation) 
1614        throws IOException {
1615        checkOpen();
1616        try {
1617          return namenode.getListing(src, startAfter, needLocation);
1618        } catch(RemoteException re) {
1619          throw re.unwrapRemoteException(AccessControlException.class,
1620                                         FileNotFoundException.class,
1621                                         UnresolvedPathException.class);
1622        }
1623      }
1624    
1625      /**
1626       * Get the file info for a specific file or directory.
1627       * @param src The string representation of the path to the file
1628       * @return object containing information regarding the file
1629       *         or null if file not found
1630       *         
1631       * @see ClientProtocol#getFileInfo(String) for description of exceptions
1632       */
1633      public HdfsFileStatus getFileInfo(String src) throws IOException {
1634        checkOpen();
1635        try {
1636          return namenode.getFileInfo(src);
1637        } catch(RemoteException re) {
1638          throw re.unwrapRemoteException(AccessControlException.class,
1639                                         FileNotFoundException.class,
1640                                         UnresolvedPathException.class);
1641        }
1642      }
1643      
1644      /**
1645       * Close status of a file
1646       * @return true if file is already closed
1647       */
1648      public boolean isFileClosed(String src) throws IOException{
1649        checkOpen();
1650        try {
1651          return namenode.isFileClosed(src);
1652        } catch(RemoteException re) {
1653          throw re.unwrapRemoteException(AccessControlException.class,
1654                                         FileNotFoundException.class,
1655                                         UnresolvedPathException.class);
1656        }
1657      }
1658      
1659      /**
1660       * Get the file info for a specific file or directory. If src
1661       * refers to a symlink then the FileStatus of the link is returned.
1662       * @param src path to a file or directory.
1663       * 
1664       * For description of exceptions thrown 
1665       * @see ClientProtocol#getFileLinkInfo(String)
1666       */
1667      public HdfsFileStatus getFileLinkInfo(String src) throws IOException {
1668        checkOpen();
1669        try {
1670          return namenode.getFileLinkInfo(src);
1671        } catch(RemoteException re) {
1672          throw re.unwrapRemoteException(AccessControlException.class,
1673                                         UnresolvedPathException.class);
1674         }
1675       }
1676    
1677      /**
1678       * Get the checksum of a file.
1679       * @param src The file path
1680       * @return The checksum 
1681       * @see DistributedFileSystem#getFileChecksum(Path)
1682       */
1683      public MD5MD5CRC32FileChecksum getFileChecksum(String src) throws IOException {
1684        checkOpen();
1685        return getFileChecksum(src, clientName, namenode, socketFactory,
1686            dfsClientConf.socketTimeout, getDataEncryptionKey(),
1687            dfsClientConf.connectToDnViaHostname);
1688      }
1689      
1690      @InterfaceAudience.Private
1691      public void clearDataEncryptionKey() {
1692        LOG.debug("Clearing encryption key");
1693        synchronized (this) {
1694          encryptionKey = null;
1695        }
1696      }
1697      
1698      /**
1699       * @return true if data sent between this client and DNs should be encrypted,
1700       *         false otherwise.
1701       * @throws IOException in the event of error communicating with the NN
1702       */
1703      boolean shouldEncryptData() throws IOException {
1704        FsServerDefaults d = getServerDefaults();
1705        return d == null ? false : d.getEncryptDataTransfer();
1706      }
1707      
1708      @InterfaceAudience.Private
1709      public DataEncryptionKey getDataEncryptionKey()
1710          throws IOException {
1711        if (shouldEncryptData()) {
1712          synchronized (this) {
1713            if (encryptionKey == null ||
1714                encryptionKey.expiryDate < Time.now()) {
1715              LOG.debug("Getting new encryption token from NN");
1716              encryptionKey = namenode.getDataEncryptionKey();
1717            }
1718            return encryptionKey;
1719          }
1720        } else {
1721          return null;
1722        }
1723      }
1724    
1725      /**
1726       * Get the checksum of a file.
1727       * @param src The file path
1728       * @param clientName the name of the client requesting the checksum.
1729       * @param namenode the RPC proxy for the namenode
1730       * @param socketFactory to create sockets to connect to DNs
1731       * @param socketTimeout timeout to use when connecting and waiting for a response
1732       * @param encryptionKey the key needed to communicate with DNs in this cluster
1733       * @param connectToDnViaHostname whether the client should use hostnames instead of IPs
1734       * @return The checksum 
1735       */
1736      private static MD5MD5CRC32FileChecksum getFileChecksum(String src,
1737          String clientName,
1738          ClientProtocol namenode, SocketFactory socketFactory, int socketTimeout,
1739          DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
1740          throws IOException {
1741        //get all block locations
1742        LocatedBlocks blockLocations = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE);
1743        if (null == blockLocations) {
1744          throw new FileNotFoundException("File does not exist: " + src);
1745        }
1746        List<LocatedBlock> locatedblocks = blockLocations.getLocatedBlocks();
1747        final DataOutputBuffer md5out = new DataOutputBuffer();
1748        int bytesPerCRC = -1;
1749        DataChecksum.Type crcType = DataChecksum.Type.DEFAULT;
1750        long crcPerBlock = 0;
1751        boolean refetchBlocks = false;
1752        int lastRetriedIndex = -1;
1753    
1754        //get block checksum for each block
1755        for(int i = 0; i < locatedblocks.size(); i++) {
1756          if (refetchBlocks) {  // refetch to get fresh tokens
1757            blockLocations = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE);
1758            if (null == blockLocations) {
1759              throw new FileNotFoundException("File does not exist: " + src);
1760            }
1761            locatedblocks = blockLocations.getLocatedBlocks();
1762            refetchBlocks = false;
1763          }
1764          LocatedBlock lb = locatedblocks.get(i);
1765          final ExtendedBlock block = lb.getBlock();
1766          final DatanodeInfo[] datanodes = lb.getLocations();
1767          
1768          //try each datanode location of the block
1769          final int timeout = 3000 * datanodes.length + socketTimeout;
1770          boolean done = false;
1771          for(int j = 0; !done && j < datanodes.length; j++) {
1772            DataOutputStream out = null;
1773            DataInputStream in = null;
1774            
1775            try {
1776              //connect to a datanode
1777              IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname,
1778                  encryptionKey, datanodes[j], timeout);
1779              out = new DataOutputStream(new BufferedOutputStream(pair.out,
1780                  HdfsConstants.SMALL_BUFFER_SIZE));
1781              in = new DataInputStream(pair.in);
1782    
1783              if (LOG.isDebugEnabled()) {
1784                LOG.debug("write to " + datanodes[j] + ": "
1785                    + Op.BLOCK_CHECKSUM + ", block=" + block);
1786              }
1787              // get block MD5
1788              new Sender(out).blockChecksum(block, lb.getBlockToken());
1789    
1790              final BlockOpResponseProto reply =
1791                BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
1792    
1793              if (reply.getStatus() != Status.SUCCESS) {
1794                if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
1795                  throw new InvalidBlockTokenException();
1796                } else {
1797                  throw new IOException("Bad response " + reply + " for block "
1798                      + block + " from datanode " + datanodes[j]);
1799                }
1800              }
1801              
1802              OpBlockChecksumResponseProto checksumData =
1803                reply.getChecksumResponse();
1804    
1805              //read byte-per-checksum
1806              final int bpc = checksumData.getBytesPerCrc();
1807              if (i == 0) { //first block
1808                bytesPerCRC = bpc;
1809              }
1810              else if (bpc != bytesPerCRC) {
1811                throw new IOException("Byte-per-checksum not matched: bpc=" + bpc
1812                    + " but bytesPerCRC=" + bytesPerCRC);
1813              }
1814              
1815              //read crc-per-block
1816              final long cpb = checksumData.getCrcPerBlock();
1817              if (locatedblocks.size() > 1 && i == 0) {
1818                crcPerBlock = cpb;
1819              }
1820    
1821              //read md5
1822              final MD5Hash md5 = new MD5Hash(
1823                  checksumData.getMd5().toByteArray());
1824              md5.write(md5out);
1825              
1826              // read crc-type
1827              final DataChecksum.Type ct;
1828              if (checksumData.hasCrcType()) {
1829                ct = PBHelper.convert(checksumData
1830                    .getCrcType());
1831              } else {
1832                LOG.debug("Retrieving checksum from an earlier-version DataNode: " +
1833                          "inferring checksum by reading first byte");
1834                ct = inferChecksumTypeByReading(
1835                    clientName, socketFactory, socketTimeout, lb, datanodes[j],
1836                    encryptionKey, connectToDnViaHostname);
1837              }
1838    
1839              if (i == 0) { // first block
1840                crcType = ct;
1841              } else if (crcType != DataChecksum.Type.MIXED
1842                  && crcType != ct) {
1843                // if crc types are mixed in a file
1844                crcType = DataChecksum.Type.MIXED;
1845              }
1846    
1847              done = true;
1848    
1849              if (LOG.isDebugEnabled()) {
1850                if (i == 0) {
1851                  LOG.debug("set bytesPerCRC=" + bytesPerCRC
1852                      + ", crcPerBlock=" + crcPerBlock);
1853                }
1854                LOG.debug("got reply from " + datanodes[j] + ": md5=" + md5);
1855              }
1856            } catch (InvalidBlockTokenException ibte) {
1857              if (i > lastRetriedIndex) {
1858                if (LOG.isDebugEnabled()) {
1859                  LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM "
1860                      + "for file " + src + " for block " + block
1861                      + " from datanode " + datanodes[j]
1862                      + ". Will retry the block once.");
1863                }
1864                lastRetriedIndex = i;
1865                done = true; // actually it's not done; but we'll retry
1866                i--; // repeat at i-th block
1867                refetchBlocks = true;
1868                break;
1869              }
1870            } catch (IOException ie) {
1871              LOG.warn("src=" + src + ", datanodes["+j+"]=" + datanodes[j], ie);
1872            } finally {
1873              IOUtils.closeStream(in);
1874              IOUtils.closeStream(out);
1875            }
1876          }
1877    
1878          if (!done) {
1879            throw new IOException("Fail to get block MD5 for " + block);
1880          }
1881        }
1882    
1883        //compute file MD5
1884        final MD5Hash fileMD5 = MD5Hash.digest(md5out.getData()); 
1885        switch (crcType) {
1886          case CRC32:
1887            return new MD5MD5CRC32GzipFileChecksum(bytesPerCRC,
1888                crcPerBlock, fileMD5);
1889          case CRC32C:
1890            return new MD5MD5CRC32CastagnoliFileChecksum(bytesPerCRC,
1891                crcPerBlock, fileMD5);
1892          default:
1893            // If there is no block allocated for the file,
1894            // return one with the magic entry that matches what previous
1895            // hdfs versions return.
1896            if (locatedblocks.size() == 0) {
1897              return new MD5MD5CRC32GzipFileChecksum(0, 0, fileMD5);
1898            }
1899    
1900            // we should never get here since the validity was checked
1901            // when getCrcType() was called above.
1902            return null;
1903        }
1904      }
1905    
1906      /**
1907       * Connect to the given datanode's datantrasfer port, and return
1908       * the resulting IOStreamPair. This includes encryption wrapping, etc.
1909       */
1910      private static IOStreamPair connectToDN(
1911          SocketFactory socketFactory, boolean connectToDnViaHostname,
1912          DataEncryptionKey encryptionKey, DatanodeInfo dn, int timeout)
1913          throws IOException
1914      {
1915        boolean success = false;
1916        Socket sock = null;
1917        try {
1918          sock = socketFactory.createSocket();
1919          String dnAddr = dn.getXferAddr(connectToDnViaHostname);
1920          if (LOG.isDebugEnabled()) {
1921            LOG.debug("Connecting to datanode " + dnAddr);
1922          }
1923          NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
1924          sock.setSoTimeout(timeout);
1925      
1926          OutputStream unbufOut = NetUtils.getOutputStream(sock);
1927          InputStream unbufIn = NetUtils.getInputStream(sock);
1928          IOStreamPair ret;
1929          if (encryptionKey != null) {
1930            ret = DataTransferEncryptor.getEncryptedStreams(
1931                    unbufOut, unbufIn, encryptionKey);
1932          } else {
1933            ret = new IOStreamPair(unbufIn, unbufOut);        
1934          }
1935          success = true;
1936          return ret;
1937        } finally {
1938          if (!success) {
1939            IOUtils.closeSocket(sock);
1940          }
1941        }
1942      }
1943      
1944      /**
1945       * Infer the checksum type for a replica by sending an OP_READ_BLOCK
1946       * for the first byte of that replica. This is used for compatibility
1947       * with older HDFS versions which did not include the checksum type in
1948       * OpBlockChecksumResponseProto.
1949       *
1950       * @param in input stream from datanode
1951       * @param out output stream to datanode
1952       * @param lb the located block
1953       * @param clientName the name of the DFSClient requesting the checksum
1954       * @param dn the connected datanode
1955       * @return the inferred checksum type
1956       * @throws IOException if an error occurs
1957       */
1958      private static Type inferChecksumTypeByReading(
1959          String clientName, SocketFactory socketFactory, int socketTimeout,
1960          LocatedBlock lb, DatanodeInfo dn,
1961          DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
1962          throws IOException {
1963        IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname,
1964            encryptionKey, dn, socketTimeout);
1965    
1966        try {
1967          DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
1968              HdfsConstants.SMALL_BUFFER_SIZE));
1969          DataInputStream in = new DataInputStream(pair.in);
1970      
1971          new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName, 0, 1, true);
1972          final BlockOpResponseProto reply =
1973              BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
1974          
1975          if (reply.getStatus() != Status.SUCCESS) {
1976            if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
1977              throw new InvalidBlockTokenException();
1978            } else {
1979              throw new IOException("Bad response " + reply + " trying to read "
1980                  + lb.getBlock() + " from datanode " + dn);
1981            }
1982          }
1983          
1984          return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
1985        } finally {
1986          IOUtils.cleanup(null, pair.in, pair.out);
1987        }
1988      }
1989    
1990      /**
1991       * Set permissions to a file or directory.
1992       * @param src path name.
1993       * @param permission
1994       * 
1995       * @see ClientProtocol#setPermission(String, FsPermission)
1996       */
1997      public void setPermission(String src, FsPermission permission)
1998          throws IOException {
1999        checkOpen();
2000        try {
2001          namenode.setPermission(src, permission);
2002        } catch(RemoteException re) {
2003          throw re.unwrapRemoteException(AccessControlException.class,
2004                                         FileNotFoundException.class,
2005                                         SafeModeException.class,
2006                                         UnresolvedPathException.class,
2007                                         SnapshotAccessControlException.class);
2008        }
2009      }
2010    
2011      /**
2012       * Set file or directory owner.
2013       * @param src path name.
2014       * @param username user id.
2015       * @param groupname user group.
2016       * 
2017       * @see ClientProtocol#setOwner(String, String, String)
2018       */
2019      public void setOwner(String src, String username, String groupname)
2020          throws IOException {
2021        checkOpen();
2022        try {
2023          namenode.setOwner(src, username, groupname);
2024        } catch(RemoteException re) {
2025          throw re.unwrapRemoteException(AccessControlException.class,
2026                                         FileNotFoundException.class,
2027                                         SafeModeException.class,
2028                                         UnresolvedPathException.class,
2029                                         SnapshotAccessControlException.class);                                   
2030        }
2031      }
2032    
2033      /**
2034       * @see ClientProtocol#getStats()
2035       */
2036      public FsStatus getDiskStatus() throws IOException {
2037        long rawNums[] = namenode.getStats();
2038        return new FsStatus(rawNums[0], rawNums[1], rawNums[2]);
2039      }
2040    
2041      /**
2042       * Returns count of blocks with no good replicas left. Normally should be 
2043       * zero.
2044       * @throws IOException
2045       */ 
2046      public long getMissingBlocksCount() throws IOException {
2047        return namenode.getStats()[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX];
2048      }
2049      
2050      /**
2051       * Returns count of blocks with one of more replica missing.
2052       * @throws IOException
2053       */ 
2054      public long getUnderReplicatedBlocksCount() throws IOException {
2055        return namenode.getStats()[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX];
2056      }
2057      
2058      /**
2059       * Returns count of blocks with at least one replica marked corrupt. 
2060       * @throws IOException
2061       */ 
2062      public long getCorruptBlocksCount() throws IOException {
2063        return namenode.getStats()[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX];
2064      }
2065      
2066      /**
2067       * @return a list in which each entry describes a corrupt file/block
2068       * @throws IOException
2069       */
2070      public CorruptFileBlocks listCorruptFileBlocks(String path,
2071                                                     String cookie)
2072        throws IOException {
2073        return namenode.listCorruptFileBlocks(path, cookie);
2074      }
2075    
2076      public DatanodeInfo[] datanodeReport(DatanodeReportType type)
2077      throws IOException {
2078        return namenode.getDatanodeReport(type);
2079      }
2080        
2081      /**
2082       * Enter, leave or get safe mode.
2083       * 
2084       * @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction,boolean)
2085       */
2086      public boolean setSafeMode(SafeModeAction action) throws IOException {
2087        return setSafeMode(action, false);
2088      }
2089      
2090      /**
2091       * Enter, leave or get safe mode.
2092       * 
2093       * @param action
2094       *          One of SafeModeAction.GET, SafeModeAction.ENTER and
2095       *          SafeModeActiob.LEAVE
2096       * @param isChecked
2097       *          If true, then check only active namenode's safemode status, else
2098       *          check first namenode's status.
2099       * @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction, boolean)
2100       */
2101      public boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException{
2102        return namenode.setSafeMode(action, isChecked);    
2103      }
2104     
2105      /**
2106       * Create one snapshot.
2107       * 
2108       * @param snapshotRoot The directory where the snapshot is to be taken
2109       * @param snapshotName Name of the snapshot
2110       * @return the snapshot path.
2111       * @see ClientProtocol#createSnapshot(String, String)
2112       */
2113      public String createSnapshot(String snapshotRoot, String snapshotName)
2114          throws IOException {
2115        checkOpen();
2116        try {
2117          return namenode.createSnapshot(snapshotRoot, snapshotName);
2118        } catch(RemoteException re) {
2119          throw re.unwrapRemoteException();
2120        }
2121      }
2122      
2123      /**
2124       * Delete a snapshot of a snapshottable directory.
2125       * 
2126       * @param snapshotRoot The snapshottable directory that the 
2127       *                    to-be-deleted snapshot belongs to
2128       * @param snapshotName The name of the to-be-deleted snapshot
2129       * @throws IOException
2130       * @see ClientProtocol#deleteSnapshot(String, String)
2131       */
2132      public void deleteSnapshot(String snapshotRoot, String snapshotName)
2133          throws IOException {
2134        try {
2135          namenode.deleteSnapshot(snapshotRoot, snapshotName);
2136        } catch(RemoteException re) {
2137          throw re.unwrapRemoteException();
2138        }
2139      }
2140      
2141      /**
2142       * Rename a snapshot.
2143       * @param snapshotDir The directory path where the snapshot was taken
2144       * @param snapshotOldName Old name of the snapshot
2145       * @param snapshotNewName New name of the snapshot
2146       * @throws IOException
2147       * @see ClientProtocol#renameSnapshot(String, String, String)
2148       */
2149      public void renameSnapshot(String snapshotDir, String snapshotOldName,
2150          String snapshotNewName) throws IOException {
2151        checkOpen();
2152        try {
2153          namenode.renameSnapshot(snapshotDir, snapshotOldName, snapshotNewName);
2154        } catch(RemoteException re) {
2155          throw re.unwrapRemoteException();
2156        }
2157      }
2158      
2159      /**
2160       * Get all the current snapshottable directories.
2161       * @return All the current snapshottable directories
2162       * @throws IOException
2163       * @see ClientProtocol#getSnapshottableDirListing()
2164       */
2165      public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
2166          throws IOException {
2167        checkOpen();
2168        try {
2169          return namenode.getSnapshottableDirListing();
2170        } catch(RemoteException re) {
2171          throw re.unwrapRemoteException();
2172        }
2173      }
2174    
2175      /**
2176       * Allow snapshot on a directory.
2177       * 
2178       * @see ClientProtocol#allowSnapshot(String snapshotRoot)
2179       */
2180      public void allowSnapshot(String snapshotRoot) throws IOException {
2181        checkOpen();
2182        try {
2183          namenode.allowSnapshot(snapshotRoot);
2184        } catch (RemoteException re) {
2185          throw re.unwrapRemoteException();
2186        }
2187      }
2188      
2189      /**
2190       * Disallow snapshot on a directory.
2191       * 
2192       * @see ClientProtocol#disallowSnapshot(String snapshotRoot)
2193       */
2194      public void disallowSnapshot(String snapshotRoot) throws IOException {
2195        checkOpen();
2196        try {
2197          namenode.disallowSnapshot(snapshotRoot);
2198        } catch (RemoteException re) {
2199          throw re.unwrapRemoteException();
2200        }
2201      }
2202      
2203      /**
2204       * Get the difference between two snapshots, or between a snapshot and the
2205       * current tree of a directory.
2206       * @see ClientProtocol#getSnapshotDiffReport(String, String, String)
2207       */
2208      public SnapshotDiffReport getSnapshotDiffReport(String snapshotDir,
2209          String fromSnapshot, String toSnapshot) throws IOException {
2210        checkOpen();
2211        try {
2212          return namenode.getSnapshotDiffReport(snapshotDir,
2213              fromSnapshot, toSnapshot);
2214        } catch(RemoteException re) {
2215          throw re.unwrapRemoteException();
2216        }
2217      }
2218      
2219      /**
2220       * Save namespace image.
2221       * 
2222       * @see ClientProtocol#saveNamespace()
2223       */
2224      void saveNamespace() throws AccessControlException, IOException {
2225        try {
2226          namenode.saveNamespace();
2227        } catch(RemoteException re) {
2228          throw re.unwrapRemoteException(AccessControlException.class);
2229        }
2230      }
2231    
2232      /**
2233       * Rolls the edit log on the active NameNode.
2234       * @return the txid of the new log segment 
2235       *
2236       * @see ClientProtocol#rollEdits()
2237       */
2238      long rollEdits() throws AccessControlException, IOException {
2239        try {
2240          return namenode.rollEdits();
2241        } catch(RemoteException re) {
2242          throw re.unwrapRemoteException(AccessControlException.class);
2243        }
2244      }
2245      
2246      /**
2247       * enable/disable restore failed storage.
2248       * 
2249       * @see ClientProtocol#restoreFailedStorage(String arg)
2250       */
2251      boolean restoreFailedStorage(String arg)
2252          throws AccessControlException, IOException{
2253        return namenode.restoreFailedStorage(arg);
2254      }
2255    
2256      /**
2257       * Refresh the hosts and exclude files.  (Rereads them.)
2258       * See {@link ClientProtocol#refreshNodes()} 
2259       * for more details.
2260       * 
2261       * @see ClientProtocol#refreshNodes()
2262       */
2263      public void refreshNodes() throws IOException {
2264        namenode.refreshNodes();
2265      }
2266    
2267      /**
2268       * Dumps DFS data structures into specified file.
2269       * 
2270       * @see ClientProtocol#metaSave(String)
2271       */
2272      public void metaSave(String pathname) throws IOException {
2273        namenode.metaSave(pathname);
2274      }
2275    
2276      /**
2277       * Requests the namenode to tell all datanodes to use a new, non-persistent
2278       * bandwidth value for dfs.balance.bandwidthPerSec.
2279       * See {@link ClientProtocol#setBalancerBandwidth(long)} 
2280       * for more details.
2281       * 
2282       * @see ClientProtocol#setBalancerBandwidth(long)
2283       */
2284      public void setBalancerBandwidth(long bandwidth) throws IOException {
2285        namenode.setBalancerBandwidth(bandwidth);
2286      }
2287        
2288      /**
2289       * @see ClientProtocol#finalizeUpgrade()
2290       */
2291      public void finalizeUpgrade() throws IOException {
2292        namenode.finalizeUpgrade();
2293      }
2294    
2295      /**
2296       */
2297      @Deprecated
2298      public boolean mkdirs(String src) throws IOException {
2299        return mkdirs(src, null, true);
2300      }
2301    
2302      /**
2303       * Create a directory (or hierarchy of directories) with the given
2304       * name and permission.
2305       *
2306       * @param src The path of the directory being created
2307       * @param permission The permission of the directory being created.
2308       * If permission == null, use {@link FsPermission#getDefault()}.
2309       * @param createParent create missing parent directory if true
2310       * 
2311       * @return True if the operation success.
2312       * 
2313       * @see ClientProtocol#mkdirs(String, FsPermission, boolean)
2314       */
2315      public boolean mkdirs(String src, FsPermission permission,
2316          boolean createParent) throws IOException {
2317        if (permission == null) {
2318          permission = FsPermission.getDefault();
2319        }
2320        FsPermission masked = permission.applyUMask(dfsClientConf.uMask);
2321        return primitiveMkdir(src, masked, createParent);
2322      }
2323    
2324      /**
2325       * Same {{@link #mkdirs(String, FsPermission, boolean)} except
2326       * that the permissions has already been masked against umask.
2327       */
2328      public boolean primitiveMkdir(String src, FsPermission absPermission)
2329        throws IOException {
2330        return primitiveMkdir(src, absPermission, true);
2331      }
2332    
2333      /**
2334       * Same {{@link #mkdirs(String, FsPermission, boolean)} except
2335       * that the permissions has already been masked against umask.
2336       */
2337      public boolean primitiveMkdir(String src, FsPermission absPermission, 
2338        boolean createParent)
2339        throws IOException {
2340        checkOpen();
2341        if (absPermission == null) {
2342          absPermission = 
2343            FsPermission.getDefault().applyUMask(dfsClientConf.uMask);
2344        } 
2345    
2346        if(LOG.isDebugEnabled()) {
2347          LOG.debug(src + ": masked=" + absPermission);
2348        }
2349        try {
2350          return namenode.mkdirs(src, absPermission, createParent);
2351        } catch(RemoteException re) {
2352          throw re.unwrapRemoteException(AccessControlException.class,
2353                                         InvalidPathException.class,
2354                                         FileAlreadyExistsException.class,
2355                                         FileNotFoundException.class,
2356                                         ParentNotDirectoryException.class,
2357                                         SafeModeException.class,
2358                                         NSQuotaExceededException.class,
2359                                         DSQuotaExceededException.class,
2360                                         UnresolvedPathException.class,
2361                                         SnapshotAccessControlException.class);
2362        }
2363      }
2364      
2365      /**
2366       * Get {@link ContentSummary} rooted at the specified directory.
2367       * @param path The string representation of the path
2368       * 
2369       * @see ClientProtocol#getContentSummary(String)
2370       */
2371      ContentSummary getContentSummary(String src) throws IOException {
2372        try {
2373          return namenode.getContentSummary(src);
2374        } catch(RemoteException re) {
2375          throw re.unwrapRemoteException(AccessControlException.class,
2376                                         FileNotFoundException.class,
2377                                         UnresolvedPathException.class);
2378        }
2379      }
2380    
2381      /**
2382       * Sets or resets quotas for a directory.
2383       * @see ClientProtocol#setQuota(String, long, long)
2384       */
2385      void setQuota(String src, long namespaceQuota, long diskspaceQuota) 
2386          throws IOException {
2387        // sanity check
2388        if ((namespaceQuota <= 0 && namespaceQuota != HdfsConstants.QUOTA_DONT_SET &&
2389             namespaceQuota != HdfsConstants.QUOTA_RESET) ||
2390            (diskspaceQuota <= 0 && diskspaceQuota != HdfsConstants.QUOTA_DONT_SET &&
2391             diskspaceQuota != HdfsConstants.QUOTA_RESET)) {
2392          throw new IllegalArgumentException("Invalid values for quota : " +
2393                                             namespaceQuota + " and " + 
2394                                             diskspaceQuota);
2395                                             
2396        }
2397        try {
2398          namenode.setQuota(src, namespaceQuota, diskspaceQuota);
2399        } catch(RemoteException re) {
2400          throw re.unwrapRemoteException(AccessControlException.class,
2401                                         FileNotFoundException.class,
2402                                         NSQuotaExceededException.class,
2403                                         DSQuotaExceededException.class,
2404                                         UnresolvedPathException.class,
2405                                         SnapshotAccessControlException.class);
2406        }
2407      }
2408    
2409      /**
2410       * set the modification and access time of a file
2411       * 
2412       * @see ClientProtocol#setTimes(String, long, long)
2413       */
2414      public void setTimes(String src, long mtime, long atime) throws IOException {
2415        checkOpen();
2416        try {
2417          namenode.setTimes(src, mtime, atime);
2418        } catch(RemoteException re) {
2419          throw re.unwrapRemoteException(AccessControlException.class,
2420                                         FileNotFoundException.class,
2421                                         UnresolvedPathException.class,
2422                                         SnapshotAccessControlException.class);
2423        }
2424      }
2425    
2426      /**
2427       * @deprecated use {@link HdfsDataInputStream} instead.
2428       */
2429      @Deprecated
2430      public static class DFSDataInputStream extends HdfsDataInputStream {
2431    
2432        public DFSDataInputStream(DFSInputStream in) throws IOException {
2433          super(in);
2434        }
2435      }
2436    
2437      void reportChecksumFailure(String file, ExtendedBlock blk, DatanodeInfo dn) {
2438        DatanodeInfo [] dnArr = { dn };
2439        LocatedBlock [] lblocks = { new LocatedBlock(blk, dnArr) };
2440        reportChecksumFailure(file, lblocks);
2441      }
2442        
2443      // just reports checksum failure and ignores any exception during the report.
2444      void reportChecksumFailure(String file, LocatedBlock lblocks[]) {
2445        try {
2446          reportBadBlocks(lblocks);
2447        } catch (IOException ie) {
2448          LOG.info("Found corruption while reading " + file
2449              + ". Error repairing corrupt blocks. Bad blocks remain.", ie);
2450        }
2451      }
2452    
2453      @Override
2454      public String toString() {
2455        return getClass().getSimpleName() + "[clientName=" + clientName
2456            + ", ugi=" + ugi + "]"; 
2457      }
2458    
2459      public DomainSocketFactory getDomainSocketFactory() {
2460        return domainSocketFactory;
2461      }
2462    
2463      public void disableLegacyBlockReaderLocal() {
2464        shouldUseLegacyBlockReaderLocal = false;
2465      }
2466    
2467      public boolean useLegacyBlockReaderLocal() {
2468        return shouldUseLegacyBlockReaderLocal;
2469      }
2470    }