001    /**
002     * Licensed to the Apache Software Foundation (ASF) under one
003     * or more contributor license agreements.  See the NOTICE file
004     * distributed with this work for additional information
005     * regarding copyright ownership.  The ASF licenses this file
006     * to you under the Apache License, Version 2.0 (the
007     * "License"); you may not use this file except in compliance
008     * with the License.  You may obtain a copy of the License at
009     *
010     *     http://www.apache.org/licenses/LICENSE-2.0
011     *
012     * Unless required by applicable law or agreed to in writing, software
013     * distributed under the License is distributed on an "AS IS" BASIS,
014     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015     * See the License for the specific language governing permissions and
016     * limitations under the License.
017     */
018    package org.apache.hadoop.hdfs;
019    
020    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
021    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
022    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
023    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
024    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT;
025    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY;
026    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT;
027    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY;
028    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT;
029    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_KEY;
030    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT;
031    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY;
032    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT;
033    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY;
034    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT;
035    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY;
036    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT;
037    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
038    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY;
039    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE;
040    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT;
041    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY;
042    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT;
043    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY;
044    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
045    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME;
046    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT;
047    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL;
048    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT;
049    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
050    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
051    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
052    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
053    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
054    
055    import java.io.BufferedOutputStream;
056    import java.io.DataInputStream;
057    import java.io.DataOutputStream;
058    import java.io.FileNotFoundException;
059    import java.io.IOException;
060    import java.io.InputStream;
061    import java.io.OutputStream;
062    import java.net.InetAddress;
063    import java.net.InetSocketAddress;
064    import java.net.NetworkInterface;
065    import java.net.Socket;
066    import java.net.SocketException;
067    import java.net.SocketAddress;
068    import java.net.URI;
069    import java.net.UnknownHostException;
070    import java.util.ArrayList;
071    import java.util.Collections;
072    import java.util.EnumSet;
073    import java.util.HashMap;
074    import java.util.LinkedHashMap;
075    import java.util.List;
076    import java.util.Map;
077    import java.util.Random;
078    
079    import javax.net.SocketFactory;
080    
081    import org.apache.commons.logging.Log;
082    import org.apache.commons.logging.LogFactory;
083    import org.apache.hadoop.classification.InterfaceAudience;
084    import org.apache.hadoop.conf.Configuration;
085    import org.apache.hadoop.fs.BlockLocation;
086    import org.apache.hadoop.fs.BlockStorageLocation;
087    import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
088    import org.apache.hadoop.fs.ContentSummary;
089    import org.apache.hadoop.fs.CreateFlag;
090    import org.apache.hadoop.fs.FileAlreadyExistsException;
091    import org.apache.hadoop.fs.FileSystem;
092    import org.apache.hadoop.fs.FsServerDefaults;
093    import org.apache.hadoop.fs.FsStatus;
094    import org.apache.hadoop.fs.HdfsBlockLocation;
095    import org.apache.hadoop.fs.InvalidPathException;
096    import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
097    import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
098    import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
099    import org.apache.hadoop.fs.Options;
100    import org.apache.hadoop.fs.Options.ChecksumOpt;
101    import org.apache.hadoop.fs.ParentNotDirectoryException;
102    import org.apache.hadoop.fs.Path;
103    import org.apache.hadoop.fs.UnresolvedLinkException;
104    import org.apache.hadoop.fs.VolumeId;
105    import org.apache.hadoop.fs.permission.FsPermission;
106    import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
107    import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
108    import org.apache.hadoop.hdfs.protocol.ClientProtocol;
109    import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
110    import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
111    import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
112    import org.apache.hadoop.hdfs.protocol.DirectoryListing;
113    import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
114    import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata;
115    import org.apache.hadoop.hdfs.protocol.HdfsConstants;
116    import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
117    import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
118    import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
119    import org.apache.hadoop.hdfs.protocol.LocatedBlock;
120    import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
121    import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
122    import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
123    import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
124    import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
125    import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
126    import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferEncryptor;
127    import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
128    import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
129    import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
130    import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
131    import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
132    import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
133    import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
134    import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
135    import org.apache.hadoop.hdfs.protocolPB.PBHelper;
136    import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
137    import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
138    import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
139    import org.apache.hadoop.hdfs.server.namenode.NameNode;
140    import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
141    import org.apache.hadoop.io.DataOutputBuffer;
142    import org.apache.hadoop.io.EnumSetWritable;
143    import org.apache.hadoop.io.IOUtils;
144    import org.apache.hadoop.io.MD5Hash;
145    import org.apache.hadoop.io.Text;
146    import org.apache.hadoop.io.retry.LossyRetryInvocationHandler;
147    import org.apache.hadoop.ipc.Client;
148    import org.apache.hadoop.ipc.RPC;
149    import org.apache.hadoop.ipc.RemoteException;
150    import org.apache.hadoop.net.DNS;
151    import org.apache.hadoop.net.NetUtils;
152    import org.apache.hadoop.security.AccessControlException;
153    import org.apache.hadoop.security.UserGroupInformation;
154    import org.apache.hadoop.security.token.SecretManager.InvalidToken;
155    import org.apache.hadoop.security.token.Token;
156    import org.apache.hadoop.security.token.TokenRenewer;
157    import org.apache.hadoop.util.DataChecksum;
158    import org.apache.hadoop.util.DataChecksum.Type;
159    import org.apache.hadoop.util.Progressable;
160    import org.apache.hadoop.util.Time;
161    
162    import com.google.common.annotations.VisibleForTesting;
163    import com.google.common.base.Joiner;
164    import com.google.common.base.Preconditions;
165    import com.google.common.net.InetAddresses;
166    
167    /********************************************************
168     * DFSClient can connect to a Hadoop Filesystem and 
169     * perform basic file tasks.  It uses the ClientProtocol
170     * to communicate with a NameNode daemon, and connects 
171     * directly to DataNodes to read/write block data.
172     *
173     * Hadoop DFS users should obtain an instance of 
174     * DistributedFileSystem, which uses DFSClient to handle
175     * filesystem tasks.
176     *
177     ********************************************************/
178    @InterfaceAudience.Private
179    public class DFSClient implements java.io.Closeable {
180      public static final Log LOG = LogFactory.getLog(DFSClient.class);
181      public static final long SERVER_DEFAULTS_VALIDITY_PERIOD = 60 * 60 * 1000L; // 1 hour
182      static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
183    
184      private final Configuration conf;
185      private final Conf dfsClientConf;
186      final ClientProtocol namenode;
187      /* The service used for delegation tokens */
188      private Text dtService;
189    
190      final UserGroupInformation ugi;
191      volatile boolean clientRunning = true;
192      volatile long lastLeaseRenewal;
193      private volatile FsServerDefaults serverDefaults;
194      private volatile long serverDefaultsLastUpdate;
195      final String clientName;
196      SocketFactory socketFactory;
197      final ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure;
198      final FileSystem.Statistics stats;
199      private final String authority;
200      final PeerCache peerCache;
201      private Random r = new Random();
202      private SocketAddress[] localInterfaceAddrs;
203      private DataEncryptionKey encryptionKey;
204      private boolean shouldUseLegacyBlockReaderLocal;
205      
206      /**
207       * DFSClient configuration 
208       */
209      public static class Conf {
210        final int hdfsTimeout;    // timeout value for a DFS operation.
211        final int maxFailoverAttempts;
212        final int failoverSleepBaseMillis;
213        final int failoverSleepMaxMillis;
214        final int maxBlockAcquireFailures;
215        final int confTime;
216        final int ioBufferSize;
217        final ChecksumOpt defaultChecksumOpt;
218        final int writePacketSize;
219        final int socketTimeout;
220        final int socketCacheCapacity;
221        final long socketCacheExpiry;
222        final long excludedNodesCacheExpiry;
223        /** Wait time window (in msec) if BlockMissingException is caught */
224        final int timeWindow;
225        final int nCachedConnRetry;
226        final int nBlockWriteRetry;
227        final int nBlockWriteLocateFollowingRetry;
228        final long defaultBlockSize;
229        final long prefetchSize;
230        final short defaultReplication;
231        final String taskId;
232        final FsPermission uMask;
233        final boolean connectToDnViaHostname;
234        final boolean getHdfsBlocksMetadataEnabled;
235        final int getFileBlockStorageLocationsNumThreads;
236        final int getFileBlockStorageLocationsTimeout;
237    
238        final boolean useLegacyBlockReader;
239        final boolean useLegacyBlockReaderLocal;
240        final String domainSocketPath;
241        final boolean skipShortCircuitChecksums;
242        final int shortCircuitBufferSize;
243        final boolean shortCircuitLocalReads;
244        final boolean domainSocketDataTraffic;
245        final int shortCircuitStreamsCacheSize;
246        final long shortCircuitStreamsCacheExpiryMs; 
247    
248        public Conf(Configuration conf) {
249          // The hdfsTimeout is currently the same as the ipc timeout 
250          hdfsTimeout = Client.getTimeout(conf);
251    
252          maxFailoverAttempts = conf.getInt(
253              DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
254              DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
255          failoverSleepBaseMillis = conf.getInt(
256              DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
257              DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);
258          failoverSleepMaxMillis = conf.getInt(
259              DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY,
260              DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT);
261    
262          maxBlockAcquireFailures = conf.getInt(
263              DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
264              DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT);
265          confTime = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
266              HdfsServerConstants.WRITE_TIMEOUT);
267          ioBufferSize = conf.getInt(
268              CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
269              CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
270          defaultChecksumOpt = getChecksumOptFromConf(conf);
271          socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
272              HdfsServerConstants.READ_TIMEOUT);
273          /** dfs.write.packet.size is an internal config variable */
274          writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
275              DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
276          defaultBlockSize = conf.getLongBytes(DFS_BLOCK_SIZE_KEY,
277              DFS_BLOCK_SIZE_DEFAULT);
278          defaultReplication = (short) conf.getInt(
279              DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);
280          taskId = conf.get("mapreduce.task.attempt.id", "NONMAPREDUCE");
281          socketCacheCapacity = conf.getInt(DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY,
282              DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT);
283          socketCacheExpiry = conf.getLong(DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,
284              DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_DEFAULT);
285          excludedNodesCacheExpiry = conf.getLong(
286              DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL,
287              DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT);
288          prefetchSize = conf.getLong(DFS_CLIENT_READ_PREFETCH_SIZE_KEY,
289              10 * defaultBlockSize);
290          timeWindow = conf.getInt(DFS_CLIENT_RETRY_WINDOW_BASE, 3000);
291          nCachedConnRetry = conf.getInt(DFS_CLIENT_CACHED_CONN_RETRY_KEY,
292              DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT);
293          nBlockWriteRetry = conf.getInt(DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY,
294              DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT);
295          nBlockWriteLocateFollowingRetry = conf.getInt(
296              DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY,
297              DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT);
298          uMask = FsPermission.getUMask(conf);
299          connectToDnViaHostname = conf.getBoolean(DFS_CLIENT_USE_DN_HOSTNAME,
300              DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT);
301          getHdfsBlocksMetadataEnabled = conf.getBoolean(
302              DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, 
303              DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT);
304          getFileBlockStorageLocationsNumThreads = conf.getInt(
305              DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS,
306              DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_NUM_THREADS_DEFAULT);
307          getFileBlockStorageLocationsTimeout = conf.getInt(
308              DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT,
309              DFSConfigKeys.DFS_CLIENT_FILE_BLOCK_STORAGE_LOCATIONS_TIMEOUT_DEFAULT);
310    
311          useLegacyBlockReader = conf.getBoolean(
312              DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER,
313              DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT);
314          useLegacyBlockReaderLocal = conf.getBoolean(
315              DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL,
316              DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT);
317          shortCircuitLocalReads = conf.getBoolean(
318              DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY,
319              DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_DEFAULT);
320          domainSocketDataTraffic = conf.getBoolean(
321              DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC,
322              DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC_DEFAULT);
323          domainSocketPath = conf.getTrimmed(
324              DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
325              DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_DEFAULT);
326    
327          if (BlockReaderLocal.LOG.isDebugEnabled()) {
328            BlockReaderLocal.LOG.debug(
329                DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL
330                + " = " + useLegacyBlockReaderLocal);
331            BlockReaderLocal.LOG.debug(
332                DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY
333                + " = " + shortCircuitLocalReads);
334            BlockReaderLocal.LOG.debug(
335                DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC
336                + " = " + domainSocketDataTraffic);
337            BlockReaderLocal.LOG.debug(
338                DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY
339                + " = " + domainSocketPath);
340          }
341    
342          skipShortCircuitChecksums = conf.getBoolean(
343              DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY,
344              DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_DEFAULT);
345          shortCircuitBufferSize = conf.getInt(
346              DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_BUFFER_SIZE_KEY,
347              DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_BUFFER_SIZE_DEFAULT);
348          shortCircuitStreamsCacheSize = conf.getInt(
349              DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_SIZE_KEY,
350              DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_SIZE_DEFAULT);
351          shortCircuitStreamsCacheExpiryMs = conf.getLong(
352              DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_KEY,
353              DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_EXPIRY_MS_DEFAULT);
354        }
355    
356        private DataChecksum.Type getChecksumType(Configuration conf) {
357          final String checksum = conf.get(
358              DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY,
359              DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
360          try {
361            return DataChecksum.Type.valueOf(checksum);
362          } catch(IllegalArgumentException iae) {
363            LOG.warn("Bad checksum type: " + checksum + ". Using default "
364                + DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
365            return DataChecksum.Type.valueOf(
366                DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT); 
367          }
368        }
369    
370        // Construct a checksum option from conf
371        private ChecksumOpt getChecksumOptFromConf(Configuration conf) {
372          DataChecksum.Type type = getChecksumType(conf);
373          int bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
374              DFS_BYTES_PER_CHECKSUM_DEFAULT);
375          return new ChecksumOpt(type, bytesPerChecksum);
376        }
377    
378        // create a DataChecksum with the default option.
379        private DataChecksum createChecksum() throws IOException {
380          return createChecksum(null);
381        }
382    
383        private DataChecksum createChecksum(ChecksumOpt userOpt) 
384            throws IOException {
385          // Fill in any missing field with the default.
386          ChecksumOpt myOpt = ChecksumOpt.processChecksumOpt(
387              defaultChecksumOpt, userOpt);
388          DataChecksum dataChecksum = DataChecksum.newDataChecksum(
389              myOpt.getChecksumType(),
390              myOpt.getBytesPerChecksum());
391          if (dataChecksum == null) {
392            throw new IOException("Invalid checksum type specified: "
393                + myOpt.getChecksumType().name());
394          }
395          return dataChecksum;
396        }
397      }
398     
399      public Conf getConf() {
400        return dfsClientConf;
401      }
402      
403      Configuration getConfiguration() {
404        return conf;
405      }
406      
407      /**
408       * A map from file names to {@link DFSOutputStream} objects
409       * that are currently being written by this client.
410       * Note that a file can only be written by a single client.
411       */
412      private final Map<String, DFSOutputStream> filesBeingWritten
413          = new HashMap<String, DFSOutputStream>();
414    
415      private final DomainSocketFactory domainSocketFactory;
416      
417      /**
418       * Same as this(NameNode.getAddress(conf), conf);
419       * @see #DFSClient(InetSocketAddress, Configuration)
420       * @deprecated Deprecated at 0.21
421       */
422      @Deprecated
423      public DFSClient(Configuration conf) throws IOException {
424        this(NameNode.getAddress(conf), conf);
425      }
426      
427      public DFSClient(InetSocketAddress address, Configuration conf) throws IOException {
428        this(NameNode.getUri(address), conf);
429      }
430    
431      /**
432       * Same as this(nameNodeUri, conf, null);
433       * @see #DFSClient(URI, Configuration, FileSystem.Statistics)
434       */
435      public DFSClient(URI nameNodeUri, Configuration conf
436          ) throws IOException {
437        this(nameNodeUri, conf, null);
438      }
439    
440      /**
441       * Same as this(nameNodeUri, null, conf, stats);
442       * @see #DFSClient(URI, ClientProtocol, Configuration, FileSystem.Statistics) 
443       */
444      public DFSClient(URI nameNodeUri, Configuration conf,
445                       FileSystem.Statistics stats)
446        throws IOException {
447        this(nameNodeUri, null, conf, stats);
448      }
449      
450      /** 
451       * Create a new DFSClient connected to the given nameNodeUri or rpcNamenode.
452       * If HA is enabled and a positive value is set for 
453       * {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} in the
454       * configuration, the DFSClient will use {@link LossyRetryInvocationHandler}
455       * as its RetryInvocationHandler. Otherwise one of nameNodeUri or rpcNamenode 
456       * must be null.
457       */
458      @VisibleForTesting
459      public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode,
460          Configuration conf, FileSystem.Statistics stats)
461        throws IOException {
462        // Copy only the required DFSClient configuration
463        this.dfsClientConf = new Conf(conf);
464        this.shouldUseLegacyBlockReaderLocal = 
465            this.dfsClientConf.useLegacyBlockReaderLocal;
466        if (this.dfsClientConf.useLegacyBlockReaderLocal) {
467          LOG.debug("Using legacy short-circuit local reads.");
468        }
469        this.conf = conf;
470        this.stats = stats;
471        this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
472        this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
473    
474        this.ugi = UserGroupInformation.getCurrentUser();
475        
476        this.authority = nameNodeUri == null? "null": nameNodeUri.getAuthority();
477        this.clientName = "DFSClient_" + dfsClientConf.taskId + "_" + 
478            DFSUtil.getRandom().nextInt()  + "_" + Thread.currentThread().getId();
479        
480        int numResponseToDrop = conf.getInt(
481            DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY,
482            DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT);
483        NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo = null;
484        if (numResponseToDrop > 0) {
485          // This case is used for testing.
486          LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY
487              + " is set to " + numResponseToDrop
488              + ", this hacked client will proactively drop responses");
489          proxyInfo = NameNodeProxies.createProxyWithLossyRetryHandler(conf,
490              nameNodeUri, ClientProtocol.class, numResponseToDrop);
491        }
492        
493        if (proxyInfo != null) {
494          this.dtService = proxyInfo.getDelegationTokenService();
495          this.namenode = proxyInfo.getProxy();
496        } else if (rpcNamenode != null) {
497          // This case is used for testing.
498          Preconditions.checkArgument(nameNodeUri == null);
499          this.namenode = rpcNamenode;
500          dtService = null;
501        } else {
502          Preconditions.checkArgument(nameNodeUri != null,
503              "null URI");
504          proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri,
505              ClientProtocol.class);
506          this.dtService = proxyInfo.getDelegationTokenService();
507          this.namenode = proxyInfo.getProxy();
508        }
509    
510        // read directly from the block file if configured.
511        this.domainSocketFactory = new DomainSocketFactory(dfsClientConf);
512    
513        String localInterfaces[] =
514          conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES);
515        localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces);
516        if (LOG.isDebugEnabled() && 0 != localInterfaces.length) {
517          LOG.debug("Using local interfaces [" +
518          Joiner.on(',').join(localInterfaces)+ "] with addresses [" +
519          Joiner.on(',').join(localInterfaceAddrs) + "]");
520        }
521        
522        this.peerCache = PeerCache.getInstance(dfsClientConf.socketCacheCapacity, dfsClientConf.socketCacheExpiry);
523      }
524      
525      /**
526       * Return the socket addresses to use with each configured
527       * local interface. Local interfaces may be specified by IP
528       * address, IP address range using CIDR notation, interface
529       * name (e.g. eth0) or sub-interface name (e.g. eth0:0).
530       * The socket addresses consist of the IPs for the interfaces
531       * and the ephemeral port (port 0). If an IP, IP range, or
532       * interface name matches an interface with sub-interfaces
533       * only the IP of the interface is used. Sub-interfaces can
534       * be used by specifying them explicitly (by IP or name).
535       * 
536       * @return SocketAddresses for the configured local interfaces,
537       *    or an empty array if none are configured
538       * @throws UnknownHostException if a given interface name is invalid
539       */
540      private static SocketAddress[] getLocalInterfaceAddrs(
541          String interfaceNames[]) throws UnknownHostException {
542        List<SocketAddress> localAddrs = new ArrayList<SocketAddress>();
543        for (String interfaceName : interfaceNames) {
544          if (InetAddresses.isInetAddress(interfaceName)) {
545            localAddrs.add(new InetSocketAddress(interfaceName, 0));
546          } else if (NetUtils.isValidSubnet(interfaceName)) {
547            for (InetAddress addr : NetUtils.getIPs(interfaceName, false)) {
548              localAddrs.add(new InetSocketAddress(addr, 0));
549            }
550          } else {
551            for (String ip : DNS.getIPs(interfaceName, false)) {
552              localAddrs.add(new InetSocketAddress(ip, 0));
553            }
554          }
555        }
556        return localAddrs.toArray(new SocketAddress[localAddrs.size()]);
557      }
558    
559      /**
560       * Select one of the configured local interfaces at random. We use a random
561       * interface because other policies like round-robin are less effective
562       * given that we cache connections to datanodes.
563       *
564       * @return one of the local interface addresses at random, or null if no
565       *    local interfaces are configured
566       */
567      SocketAddress getRandomLocalInterfaceAddr() {
568        if (localInterfaceAddrs.length == 0) {
569          return null;
570        }
571        final int idx = r.nextInt(localInterfaceAddrs.length);
572        final SocketAddress addr = localInterfaceAddrs[idx];
573        if (LOG.isDebugEnabled()) {
574          LOG.debug("Using local interface " + addr);
575        }
576        return addr;
577      }
578    
579      /**
580       * Return the number of times the client should go back to the namenode
581       * to retrieve block locations when reading.
582       */
583      int getMaxBlockAcquireFailures() {
584        return dfsClientConf.maxBlockAcquireFailures;
585      }
586    
587      /**
588       * Return the timeout that clients should use when writing to datanodes.
589       * @param numNodes the number of nodes in the pipeline.
590       */
591      int getDatanodeWriteTimeout(int numNodes) {
592        return (dfsClientConf.confTime > 0) ?
593          (dfsClientConf.confTime + HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * numNodes) : 0;
594      }
595    
596      int getDatanodeReadTimeout(int numNodes) {
597        return dfsClientConf.socketTimeout > 0 ?
598            (HdfsServerConstants.READ_TIMEOUT_EXTENSION * numNodes +
599                dfsClientConf.socketTimeout) : 0;
600      }
601      
602      int getHdfsTimeout() {
603        return dfsClientConf.hdfsTimeout;
604      }
605      
606      @VisibleForTesting
607      public String getClientName() {
608        return clientName;
609      }
610    
611      void checkOpen() throws IOException {
612        if (!clientRunning) {
613          IOException result = new IOException("Filesystem closed");
614          throw result;
615        }
616      }
617    
618      /** Return the lease renewer instance. The renewer thread won't start
619       *  until the first output stream is created. The same instance will
620       *  be returned until all output streams are closed.
621       */
622      public LeaseRenewer getLeaseRenewer() throws IOException {
623          return LeaseRenewer.getInstance(authority, ugi, this);
624      }
625    
626      /** Get a lease and start automatic renewal */
627      private void beginFileLease(final String src, final DFSOutputStream out) 
628          throws IOException {
629        getLeaseRenewer().put(src, out, this);
630      }
631    
632      /** Stop renewal of lease for the file. */
633      void endFileLease(final String src) throws IOException {
634        getLeaseRenewer().closeFile(src, this);
635      }
636        
637    
638      /** Put a file. Only called from LeaseRenewer, where proper locking is
639       *  enforced to consistently update its local dfsclients array and 
640       *  client's filesBeingWritten map.
641       */
642      void putFileBeingWritten(final String src, final DFSOutputStream out) {
643        synchronized(filesBeingWritten) {
644          filesBeingWritten.put(src, out);
645          // update the last lease renewal time only when there was no
646          // writes. once there is one write stream open, the lease renewer
647          // thread keeps it updated well with in anyone's expiration time.
648          if (lastLeaseRenewal == 0) {
649            updateLastLeaseRenewal();
650          }
651        }
652      }
653    
654      /** Remove a file. Only called from LeaseRenewer. */
655      void removeFileBeingWritten(final String src) {
656        synchronized(filesBeingWritten) {
657          filesBeingWritten.remove(src);
658          if (filesBeingWritten.isEmpty()) {
659            lastLeaseRenewal = 0;
660          }
661        }
662      }
663    
664      /** Is file-being-written map empty? */
665      boolean isFilesBeingWrittenEmpty() {
666        synchronized(filesBeingWritten) {
667          return filesBeingWritten.isEmpty();
668        }
669      }
670      
671      /** @return true if the client is running */
672      boolean isClientRunning() {
673        return clientRunning;
674      }
675    
676      long getLastLeaseRenewal() {
677        return lastLeaseRenewal;
678      }
679    
680      void updateLastLeaseRenewal() {
681        synchronized(filesBeingWritten) {
682          if (filesBeingWritten.isEmpty()) {
683            return;
684          }
685          lastLeaseRenewal = Time.now();
686        }
687      }
688    
689      /**
690       * Renew leases.
691       * @return true if lease was renewed. May return false if this
692       * client has been closed or has no files open.
693       **/
694      boolean renewLease() throws IOException {
695        if (clientRunning && !isFilesBeingWrittenEmpty()) {
696          try {
697            namenode.renewLease(clientName);
698            updateLastLeaseRenewal();
699            return true;
700          } catch (IOException e) {
701            // Abort if the lease has already expired. 
702            final long elapsed = Time.now() - getLastLeaseRenewal();
703            if (elapsed > HdfsConstants.LEASE_HARDLIMIT_PERIOD) {
704              LOG.warn("Failed to renew lease for " + clientName + " for "
705                  + (elapsed/1000) + " seconds (>= soft-limit ="
706                  + (HdfsConstants.LEASE_HARDLIMIT_PERIOD/1000) + " seconds.) "
707                  + "Closing all files being written ...", e);
708              closeAllFilesBeingWritten(true);
709            } else {
710              // Let the lease renewer handle it and retry.
711              throw e;
712            }
713          }
714        }
715        return false;
716      }
717      
718      /**
719       * Close connections the Namenode.
720       */
721      void closeConnectionToNamenode() {
722        RPC.stopProxy(namenode);
723      }
724      
725      /** Abort and release resources held.  Ignore all errors. */
726      void abort() {
727        clientRunning = false;
728        closeAllFilesBeingWritten(true);
729    
730        try {
731          // remove reference to this client and stop the renewer,
732          // if there is no more clients under the renewer.
733          getLeaseRenewer().closeClient(this);
734        } catch (IOException ioe) {
735           LOG.info("Exception occurred while aborting the client " + ioe);
736        }
737        closeConnectionToNamenode();
738      }
739    
740      /** Close/abort all files being written. */
741      private void closeAllFilesBeingWritten(final boolean abort) {
742        for(;;) {
743          final String src;
744          final DFSOutputStream out;
745          synchronized(filesBeingWritten) {
746            if (filesBeingWritten.isEmpty()) {
747              return;
748            }
749            src = filesBeingWritten.keySet().iterator().next();
750            out = filesBeingWritten.remove(src);
751          }
752          if (out != null) {
753            try {
754              if (abort) {
755                out.abort();
756              } else {
757                out.close();
758              }
759            } catch(IOException ie) {
760              LOG.error("Failed to " + (abort? "abort": "close") + " file " + src,
761                  ie);
762            }
763          }
764        }
765      }
766    
767      /**
768       * Close the file system, abandoning all of the leases and files being
769       * created and close connections to the namenode.
770       */
771      @Override
772      public synchronized void close() throws IOException {
773        if(clientRunning) {
774          closeAllFilesBeingWritten(false);
775          clientRunning = false;
776          getLeaseRenewer().closeClient(this);
777          // close connections to the namenode
778          closeConnectionToNamenode();
779        }
780      }
781    
782      /**
783       * Get the default block size for this cluster
784       * @return the default block size in bytes
785       */
786      public long getDefaultBlockSize() {
787        return dfsClientConf.defaultBlockSize;
788      }
789        
790      /**
791       * @see ClientProtocol#getPreferredBlockSize(String)
792       */
793      public long getBlockSize(String f) throws IOException {
794        try {
795          return namenode.getPreferredBlockSize(f);
796        } catch (IOException ie) {
797          LOG.warn("Problem getting block size", ie);
798          throw ie;
799        }
800      }
801    
802      /**
803       * Get server default values for a number of configuration params.
804       * @see ClientProtocol#getServerDefaults()
805       */
806      public FsServerDefaults getServerDefaults() throws IOException {
807        long now = Time.now();
808        if (now - serverDefaultsLastUpdate > SERVER_DEFAULTS_VALIDITY_PERIOD) {
809          serverDefaults = namenode.getServerDefaults();
810          serverDefaultsLastUpdate = now;
811        }
812        return serverDefaults;
813      }
814      
815      /**
816       * Get a canonical token service name for this client's tokens.  Null should
817       * be returned if the client is not using tokens.
818       * @return the token service for the client
819       */
820      @InterfaceAudience.LimitedPrivate( { "HDFS" }) 
821      public String getCanonicalServiceName() {
822        return (dtService != null) ? dtService.toString() : null;
823      }
824      
825      /**
826       * @see ClientProtocol#getDelegationToken(Text)
827       */
828      public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
829          throws IOException {
830        assert dtService != null;
831        Token<DelegationTokenIdentifier> token =
832          namenode.getDelegationToken(renewer);
833        token.setService(this.dtService);
834    
835        LOG.info("Created " + DelegationTokenIdentifier.stringifyToken(token));
836        return token;
837      }
838    
839      /**
840       * Renew a delegation token
841       * @param token the token to renew
842       * @return the new expiration time
843       * @throws InvalidToken
844       * @throws IOException
845       * @deprecated Use Token.renew instead.
846       */
847      @Deprecated
848      public long renewDelegationToken(Token<DelegationTokenIdentifier> token)
849          throws InvalidToken, IOException {
850        LOG.info("Renewing " + DelegationTokenIdentifier.stringifyToken(token));
851        try {
852          return token.renew(conf);
853        } catch (InterruptedException ie) {                                       
854          throw new RuntimeException("caught interrupted", ie);
855        } catch (RemoteException re) {
856          throw re.unwrapRemoteException(InvalidToken.class,
857                                         AccessControlException.class);
858        }
859      }
860      
861      private static Map<String, Boolean> localAddrMap = Collections
862          .synchronizedMap(new HashMap<String, Boolean>());
863      
864      static boolean isLocalAddress(InetSocketAddress targetAddr) {
865        InetAddress addr = targetAddr.getAddress();
866        Boolean cached = localAddrMap.get(addr.getHostAddress());
867        if (cached != null) {
868          if (LOG.isTraceEnabled()) {
869            LOG.trace("Address " + targetAddr +
870                      (cached ? " is local" : " is not local"));
871          }
872          return cached;
873        }
874        
875        boolean local = NetUtils.isLocalAddress(addr);
876    
877        if (LOG.isTraceEnabled()) {
878          LOG.trace("Address " + targetAddr +
879                    (local ? " is local" : " is not local"));
880        }
881        localAddrMap.put(addr.getHostAddress(), local);
882        return local;
883      }
884      
885      /**
886       * Should the block access token be refetched on an exception
887       * 
888       * @param ex Exception received
889       * @param targetAddr Target datanode address from where exception was received
890       * @return true if block access token has expired or invalid and it should be
891       *         refetched
892       */
893      private static boolean tokenRefetchNeeded(IOException ex,
894          InetSocketAddress targetAddr) {
895        /*
896         * Get a new access token and retry. Retry is needed in 2 cases. 1) When
897         * both NN and DN re-started while DFSClient holding a cached access token.
898         * 2) In the case that NN fails to update its access key at pre-set interval
899         * (by a wide margin) and subsequently restarts. In this case, DN
900         * re-registers itself with NN and receives a new access key, but DN will
901         * delete the old access key from its memory since it's considered expired
902         * based on the estimated expiration date.
903         */
904        if (ex instanceof InvalidBlockTokenException || ex instanceof InvalidToken) {
905          LOG.info("Access token was invalid when connecting to " + targetAddr
906              + " : " + ex);
907          return true;
908        }
909        return false;
910      }
911      
912      /**
913       * Cancel a delegation token
914       * @param token the token to cancel
915       * @throws InvalidToken
916       * @throws IOException
917       * @deprecated Use Token.cancel instead.
918       */
919      @Deprecated
920      public void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
921          throws InvalidToken, IOException {
922        LOG.info("Cancelling " + DelegationTokenIdentifier.stringifyToken(token));
923        try {
924          token.cancel(conf);
925         } catch (InterruptedException ie) {                                       
926          throw new RuntimeException("caught interrupted", ie);
927        } catch (RemoteException re) {
928          throw re.unwrapRemoteException(InvalidToken.class,
929                                         AccessControlException.class);
930        }
931      }
932      
933      @InterfaceAudience.Private
934      public static class Renewer extends TokenRenewer {
935        
936        static {
937          //Ensure that HDFS Configuration files are loaded before trying to use
938          // the renewer.
939          HdfsConfiguration.init();
940        }
941        
942        @Override
943        public boolean handleKind(Text kind) {
944          return DelegationTokenIdentifier.HDFS_DELEGATION_KIND.equals(kind);
945        }
946    
947        @SuppressWarnings("unchecked")
948        @Override
949        public long renew(Token<?> token, Configuration conf) throws IOException {
950          Token<DelegationTokenIdentifier> delToken = 
951            (Token<DelegationTokenIdentifier>) token;
952          ClientProtocol nn = getNNProxy(delToken, conf);
953          try {
954            return nn.renewDelegationToken(delToken);
955          } catch (RemoteException re) {
956            throw re.unwrapRemoteException(InvalidToken.class, 
957                                           AccessControlException.class);
958          }
959        }
960    
961        @SuppressWarnings("unchecked")
962        @Override
963        public void cancel(Token<?> token, Configuration conf) throws IOException {
964          Token<DelegationTokenIdentifier> delToken = 
965              (Token<DelegationTokenIdentifier>) token;
966          LOG.info("Cancelling " + 
967                   DelegationTokenIdentifier.stringifyToken(delToken));
968          ClientProtocol nn = getNNProxy(delToken, conf);
969          try {
970            nn.cancelDelegationToken(delToken);
971          } catch (RemoteException re) {
972            throw re.unwrapRemoteException(InvalidToken.class,
973                AccessControlException.class);
974          }
975        }
976        
977        private static ClientProtocol getNNProxy(
978            Token<DelegationTokenIdentifier> token, Configuration conf)
979            throws IOException {
980          URI uri = HAUtil.getServiceUriFromToken(token);
981          if (HAUtil.isTokenForLogicalUri(token) &&
982              !HAUtil.isLogicalUri(conf, uri)) {
983            // If the token is for a logical nameservice, but the configuration
984            // we have disagrees about that, we can't actually renew it.
985            // This can be the case in MR, for example, if the RM doesn't
986            // have all of the HA clusters configured in its configuration.
987            throw new IOException("Unable to map logical nameservice URI '" +
988                uri + "' to a NameNode. Local configuration does not have " +
989                "a failover proxy provider configured.");
990          }
991          
992          NameNodeProxies.ProxyAndInfo<ClientProtocol> info =
993            NameNodeProxies.createProxy(conf, uri, ClientProtocol.class);
994          assert info.getDelegationTokenService().equals(token.getService()) :
995            "Returned service '" + info.getDelegationTokenService().toString() +
996            "' doesn't match expected service '" +
997            token.getService().toString() + "'";
998            
999          return info.getProxy();
1000        }
1001    
1002        @Override
1003        public boolean isManaged(Token<?> token) throws IOException {
1004          return true;
1005        }
1006        
1007      }
1008    
1009      /**
1010       * Report corrupt blocks that were discovered by the client.
1011       * @see ClientProtocol#reportBadBlocks(LocatedBlock[])
1012       */
1013      public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
1014        namenode.reportBadBlocks(blocks);
1015      }
1016      
1017      public short getDefaultReplication() {
1018        return dfsClientConf.defaultReplication;
1019      }
1020      
1021      public LocatedBlocks getLocatedBlocks(String src, long start)
1022          throws IOException {
1023        return getLocatedBlocks(src, start, dfsClientConf.prefetchSize);
1024      }
1025    
1026      /*
1027       * This is just a wrapper around callGetBlockLocations, but non-static so that
1028       * we can stub it out for tests.
1029       */
1030      @VisibleForTesting
1031      public LocatedBlocks getLocatedBlocks(String src, long start, long length)
1032          throws IOException {
1033        return callGetBlockLocations(namenode, src, start, length);
1034      }
1035    
1036      /**
1037       * @see ClientProtocol#getBlockLocations(String, long, long)
1038       */
1039      static LocatedBlocks callGetBlockLocations(ClientProtocol namenode,
1040          String src, long start, long length) 
1041          throws IOException {
1042        try {
1043          return namenode.getBlockLocations(src, start, length);
1044        } catch(RemoteException re) {
1045          throw re.unwrapRemoteException(AccessControlException.class,
1046                                         FileNotFoundException.class,
1047                                         UnresolvedPathException.class);
1048        }
1049      }
1050    
1051      /**
1052       * Recover a file's lease
1053       * @param src a file's path
1054       * @return true if the file is already closed
1055       * @throws IOException
1056       */
1057      boolean recoverLease(String src) throws IOException {
1058        checkOpen();
1059    
1060        try {
1061          return namenode.recoverLease(src, clientName);
1062        } catch (RemoteException re) {
1063          throw re.unwrapRemoteException(FileNotFoundException.class,
1064                                         AccessControlException.class,
1065                                         UnresolvedPathException.class);
1066        }
1067      }
1068    
1069      /**
1070       * Get block location info about file
1071       * 
1072       * getBlockLocations() returns a list of hostnames that store 
1073       * data for a specific file region.  It returns a set of hostnames
1074       * for every block within the indicated region.
1075       *
1076       * This function is very useful when writing code that considers
1077       * data-placement when performing operations.  For example, the
1078       * MapReduce system tries to schedule tasks on the same machines
1079       * as the data-block the task processes. 
1080       */
1081      public BlockLocation[] getBlockLocations(String src, long start, 
1082        long length) throws IOException, UnresolvedLinkException {
1083        LocatedBlocks blocks = getLocatedBlocks(src, start, length);
1084        BlockLocation[] locations =  DFSUtil.locatedBlocks2Locations(blocks);
1085        HdfsBlockLocation[] hdfsLocations = new HdfsBlockLocation[locations.length];
1086        for (int i = 0; i < locations.length; i++) {
1087          hdfsLocations[i] = new HdfsBlockLocation(locations[i], blocks.get(i));
1088        }
1089        return hdfsLocations;
1090      }
1091      
1092      /**
1093       * Get block location information about a list of {@link HdfsBlockLocation}.
1094       * Used by {@link DistributedFileSystem#getFileBlockStorageLocations(List)} to
1095       * get {@link BlockStorageLocation}s for blocks returned by
1096       * {@link DistributedFileSystem#getFileBlockLocations(org.apache.hadoop.fs.FileStatus, long, long)}
1097       * .
1098       * 
1099       * This is done by making a round of RPCs to the associated datanodes, asking
1100       * the volume of each block replica. The returned array of
1101       * {@link BlockStorageLocation} expose this information as a
1102       * {@link VolumeId}.
1103       * 
1104       * @param blockLocations
1105       *          target blocks on which to query volume location information
1106       * @return volumeBlockLocations original block array augmented with additional
1107       *         volume location information for each replica.
1108       */
1109      public BlockStorageLocation[] getBlockStorageLocations(
1110          List<BlockLocation> blockLocations) throws IOException,
1111          UnsupportedOperationException, InvalidBlockTokenException {
1112        if (!getConf().getHdfsBlocksMetadataEnabled) {
1113          throw new UnsupportedOperationException("Datanode-side support for " +
1114              "getVolumeBlockLocations() must also be enabled in the client " +
1115              "configuration.");
1116        }
1117        // Downcast blockLocations and fetch out required LocatedBlock(s)
1118        List<LocatedBlock> blocks = new ArrayList<LocatedBlock>();
1119        for (BlockLocation loc : blockLocations) {
1120          if (!(loc instanceof HdfsBlockLocation)) {
1121            throw new ClassCastException("DFSClient#getVolumeBlockLocations " +
1122                "expected to be passed HdfsBlockLocations");
1123          }
1124          HdfsBlockLocation hdfsLoc = (HdfsBlockLocation) loc;
1125          blocks.add(hdfsLoc.getLocatedBlock());
1126        }
1127        
1128        // Re-group the LocatedBlocks to be grouped by datanodes, with the values
1129        // a list of the LocatedBlocks on the datanode.
1130        Map<DatanodeInfo, List<LocatedBlock>> datanodeBlocks = 
1131            new LinkedHashMap<DatanodeInfo, List<LocatedBlock>>();
1132        for (LocatedBlock b : blocks) {
1133          for (DatanodeInfo info : b.getLocations()) {
1134            if (!datanodeBlocks.containsKey(info)) {
1135              datanodeBlocks.put(info, new ArrayList<LocatedBlock>());
1136            }
1137            List<LocatedBlock> l = datanodeBlocks.get(info);
1138            l.add(b);
1139          }
1140        }
1141            
1142        // Make RPCs to the datanodes to get volume locations for its replicas
1143        List<HdfsBlocksMetadata> metadatas = BlockStorageLocationUtil
1144            .queryDatanodesForHdfsBlocksMetadata(conf, datanodeBlocks,
1145                getConf().getFileBlockStorageLocationsNumThreads,
1146                getConf().getFileBlockStorageLocationsTimeout,
1147                getConf().connectToDnViaHostname);
1148        
1149        // Regroup the returned VolumeId metadata to again be grouped by
1150        // LocatedBlock rather than by datanode
1151        Map<LocatedBlock, List<VolumeId>> blockVolumeIds = BlockStorageLocationUtil
1152            .associateVolumeIdsWithBlocks(blocks, datanodeBlocks, metadatas);
1153        
1154        // Combine original BlockLocations with new VolumeId information
1155        BlockStorageLocation[] volumeBlockLocations = BlockStorageLocationUtil
1156            .convertToVolumeBlockLocations(blocks, blockVolumeIds);
1157    
1158        return volumeBlockLocations;
1159      }
1160      
1161      public DFSInputStream open(String src) 
1162          throws IOException, UnresolvedLinkException {
1163        return open(src, dfsClientConf.ioBufferSize, true, null);
1164      }
1165    
1166      /**
1167       * Create an input stream that obtains a nodelist from the
1168       * namenode, and then reads from all the right places.  Creates
1169       * inner subclass of InputStream that does the right out-of-band
1170       * work.
1171       * @deprecated Use {@link #open(String, int, boolean)} instead.
1172       */
1173      @Deprecated
1174      public DFSInputStream open(String src, int buffersize, boolean verifyChecksum,
1175                                 FileSystem.Statistics stats)
1176          throws IOException, UnresolvedLinkException {
1177        return open(src, buffersize, verifyChecksum);
1178      }
1179      
1180    
1181      /**
1182       * Create an input stream that obtains a nodelist from the
1183       * namenode, and then reads from all the right places.  Creates
1184       * inner subclass of InputStream that does the right out-of-band
1185       * work.
1186       */
1187      public DFSInputStream open(String src, int buffersize, boolean verifyChecksum)
1188          throws IOException, UnresolvedLinkException {
1189        checkOpen();
1190        //    Get block info from namenode
1191        return new DFSInputStream(this, src, buffersize, verifyChecksum);
1192      }
1193    
1194      /**
1195       * Get the namenode associated with this DFSClient object
1196       * @return the namenode associated with this DFSClient object
1197       */
1198      public ClientProtocol getNamenode() {
1199        return namenode;
1200      }
1201      
1202      /**
1203       * Call {@link #create(String, boolean, short, long, Progressable)} with
1204       * default <code>replication</code> and <code>blockSize<code> and null <code>
1205       * progress</code>.
1206       */
1207      public OutputStream create(String src, boolean overwrite) 
1208          throws IOException {
1209        return create(src, overwrite, dfsClientConf.defaultReplication,
1210            dfsClientConf.defaultBlockSize, null);
1211      }
1212        
1213      /**
1214       * Call {@link #create(String, boolean, short, long, Progressable)} with
1215       * default <code>replication</code> and <code>blockSize<code>.
1216       */
1217      public OutputStream create(String src, 
1218                                 boolean overwrite,
1219                                 Progressable progress) throws IOException {
1220        return create(src, overwrite, dfsClientConf.defaultReplication,
1221            dfsClientConf.defaultBlockSize, progress);
1222      }
1223        
1224      /**
1225       * Call {@link #create(String, boolean, short, long, Progressable)} with
1226       * null <code>progress</code>.
1227       */
1228      public OutputStream create(String src, 
1229                                 boolean overwrite, 
1230                                 short replication,
1231                                 long blockSize) throws IOException {
1232        return create(src, overwrite, replication, blockSize, null);
1233      }
1234    
1235      /**
1236       * Call {@link #create(String, boolean, short, long, Progressable, int)}
1237       * with default bufferSize.
1238       */
1239      public OutputStream create(String src, boolean overwrite, short replication,
1240          long blockSize, Progressable progress) throws IOException {
1241        return create(src, overwrite, replication, blockSize, progress,
1242            dfsClientConf.ioBufferSize);
1243      }
1244    
1245      /**
1246       * Call {@link #create(String, FsPermission, EnumSet, short, long, 
1247       * Progressable, int, ChecksumOpt)} with default <code>permission</code>
1248       * {@link FsPermission#getFileDefault()}.
1249       * 
1250       * @param src File name
1251       * @param overwrite overwrite an existing file if true
1252       * @param replication replication factor for the file
1253       * @param blockSize maximum block size
1254       * @param progress interface for reporting client progress
1255       * @param buffersize underlying buffersize
1256       * 
1257       * @return output stream
1258       */
1259      public OutputStream create(String src,
1260                                 boolean overwrite,
1261                                 short replication,
1262                                 long blockSize,
1263                                 Progressable progress,
1264                                 int buffersize)
1265          throws IOException {
1266        return create(src, FsPermission.getFileDefault(),
1267            overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
1268                : EnumSet.of(CreateFlag.CREATE), replication, blockSize, progress,
1269            buffersize, null);
1270      }
1271    
1272      /**
1273       * Call {@link #create(String, FsPermission, EnumSet, boolean, short, 
1274       * long, Progressable, int, ChecksumOpt)} with <code>createParent</code>
1275       *  set to true.
1276       */
1277      public DFSOutputStream create(String src, 
1278                                 FsPermission permission,
1279                                 EnumSet<CreateFlag> flag, 
1280                                 short replication,
1281                                 long blockSize,
1282                                 Progressable progress,
1283                                 int buffersize,
1284                                 ChecksumOpt checksumOpt)
1285          throws IOException {
1286        return create(src, permission, flag, true,
1287            replication, blockSize, progress, buffersize, checksumOpt, null);
1288      }
1289    
1290      /**
1291       * Create a new dfs file with the specified block replication 
1292       * with write-progress reporting and return an output stream for writing
1293       * into the file.  
1294       * 
1295       * @param src File name
1296       * @param permission The permission of the directory being created.
1297       *          If null, use default permission {@link FsPermission#getFileDefault()}
1298       * @param flag indicates create a new file or create/overwrite an
1299       *          existing file or append to an existing file
1300       * @param createParent create missing parent directory if true
1301       * @param replication block replication
1302       * @param blockSize maximum block size
1303       * @param progress interface for reporting client progress
1304       * @param buffersize underlying buffer size 
1305       * @param checksumOpt checksum options
1306       * 
1307       * @return output stream
1308       * 
1309       * @see ClientProtocol#create(String, FsPermission, String, EnumSetWritable,
1310       * boolean, short, long) for detailed description of exceptions thrown
1311       */
1312      public DFSOutputStream create(String src, 
1313                                 FsPermission permission,
1314                                 EnumSet<CreateFlag> flag, 
1315                                 boolean createParent,
1316                                 short replication,
1317                                 long blockSize,
1318                                 Progressable progress,
1319                                 int buffersize,
1320                                 ChecksumOpt checksumOpt) throws IOException {
1321        return create(src, permission, flag, createParent, replication, blockSize, 
1322            progress, buffersize, checksumOpt, null);
1323      }
1324    
1325      /**
1326       * Same as {@link #create(String, FsPermission, EnumSet, boolean, short, long,
1327       * Progressable, int, ChecksumOpt)} with the addition of favoredNodes that is
1328       * a hint to where the namenode should place the file blocks.
1329       * The favored nodes hint is not persisted in HDFS. Hence it may be honored
1330       * at the creation time only. HDFS could move the blocks during balancing or
1331       * replication, to move the blocks from favored nodes. A value of null means
1332       * no favored nodes for this create
1333       */
1334      public DFSOutputStream create(String src, 
1335                                 FsPermission permission,
1336                                 EnumSet<CreateFlag> flag, 
1337                                 boolean createParent,
1338                                 short replication,
1339                                 long blockSize,
1340                                 Progressable progress,
1341                                 int buffersize,
1342                                 ChecksumOpt checksumOpt,
1343                                 InetSocketAddress[] favoredNodes) throws IOException {
1344        checkOpen();
1345        if (permission == null) {
1346          permission = FsPermission.getFileDefault();
1347        }
1348        FsPermission masked = permission.applyUMask(dfsClientConf.uMask);
1349        if(LOG.isDebugEnabled()) {
1350          LOG.debug(src + ": masked=" + masked);
1351        }
1352        String[] favoredNodeStrs = null;
1353        if (favoredNodes != null) {
1354          favoredNodeStrs = new String[favoredNodes.length];
1355          for (int i = 0; i < favoredNodes.length; i++) {
1356            favoredNodeStrs[i] = 
1357                favoredNodes[i].getHostName() + ":" 
1358                             + favoredNodes[i].getPort();
1359          }
1360        }
1361        final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
1362            src, masked, flag, createParent, replication, blockSize, progress,
1363            buffersize, dfsClientConf.createChecksum(checksumOpt), favoredNodeStrs);
1364        beginFileLease(src, result);
1365        return result;
1366      }
1367      
1368      /**
1369       * Append to an existing file if {@link CreateFlag#APPEND} is present
1370       */
1371      private DFSOutputStream primitiveAppend(String src, EnumSet<CreateFlag> flag,
1372          int buffersize, Progressable progress) throws IOException {
1373        if (flag.contains(CreateFlag.APPEND)) {
1374          HdfsFileStatus stat = getFileInfo(src);
1375          if (stat == null) { // No file to append to
1376            // New file needs to be created if create option is present
1377            if (!flag.contains(CreateFlag.CREATE)) {
1378              throw new FileNotFoundException("failed to append to non-existent file "
1379                  + src + " on client " + clientName);
1380            }
1381            return null;
1382          }
1383          return callAppend(stat, src, buffersize, progress);
1384        }
1385        return null;
1386      }
1387      
1388      /**
1389       * Same as {{@link #create(String, FsPermission, EnumSet, short, long,
1390       *  Progressable, int, ChecksumOpt)} except that the permission
1391       *  is absolute (ie has already been masked with umask.
1392       */
1393      public DFSOutputStream primitiveCreate(String src, 
1394                                 FsPermission absPermission,
1395                                 EnumSet<CreateFlag> flag,
1396                                 boolean createParent,
1397                                 short replication,
1398                                 long blockSize,
1399                                 Progressable progress,
1400                                 int buffersize,
1401                                 ChecksumOpt checksumOpt)
1402          throws IOException, UnresolvedLinkException {
1403        checkOpen();
1404        CreateFlag.validate(flag);
1405        DFSOutputStream result = primitiveAppend(src, flag, buffersize, progress);
1406        if (result == null) {
1407          DataChecksum checksum = dfsClientConf.createChecksum(checksumOpt);
1408          result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
1409              flag, createParent, replication, blockSize, progress, buffersize,
1410              checksum);
1411        }
1412        beginFileLease(src, result);
1413        return result;
1414      }
1415      
1416      /**
1417       * Creates a symbolic link.
1418       * 
1419       * @see ClientProtocol#createSymlink(String, String,FsPermission, boolean) 
1420       */
1421      public void createSymlink(String target, String link, boolean createParent)
1422          throws IOException {
1423        try {
1424          FsPermission dirPerm = 
1425              FsPermission.getDefault().applyUMask(dfsClientConf.uMask); 
1426          namenode.createSymlink(target, link, dirPerm, createParent);
1427        } catch (RemoteException re) {
1428          throw re.unwrapRemoteException(AccessControlException.class,
1429                                         FileAlreadyExistsException.class, 
1430                                         FileNotFoundException.class,
1431                                         ParentNotDirectoryException.class,
1432                                         NSQuotaExceededException.class, 
1433                                         DSQuotaExceededException.class,
1434                                         UnresolvedPathException.class,
1435                                         SnapshotAccessControlException.class);
1436        }
1437      }
1438    
1439      /**
1440       * Resolve the *first* symlink, if any, in the path.
1441       * 
1442       * @see ClientProtocol#getLinkTarget(String)
1443       */
1444      public String getLinkTarget(String path) throws IOException { 
1445        checkOpen();
1446        try {
1447          return namenode.getLinkTarget(path);
1448        } catch (RemoteException re) {
1449          throw re.unwrapRemoteException(AccessControlException.class,
1450                                         FileNotFoundException.class);
1451        }
1452      }
1453    
1454      /** Method to get stream returned by append call */
1455      private DFSOutputStream callAppend(HdfsFileStatus stat, String src,
1456          int buffersize, Progressable progress) throws IOException {
1457        LocatedBlock lastBlock = null;
1458        try {
1459          lastBlock = namenode.append(src, clientName);
1460        } catch(RemoteException re) {
1461          throw re.unwrapRemoteException(AccessControlException.class,
1462                                         FileNotFoundException.class,
1463                                         SafeModeException.class,
1464                                         DSQuotaExceededException.class,
1465                                         UnsupportedOperationException.class,
1466                                         UnresolvedPathException.class,
1467                                         SnapshotAccessControlException.class);
1468        }
1469        return DFSOutputStream.newStreamForAppend(this, src, buffersize, progress,
1470            lastBlock, stat, dfsClientConf.createChecksum());
1471      }
1472      
1473      /**
1474       * Append to an existing HDFS file.  
1475       * 
1476       * @param src file name
1477       * @param buffersize buffer size
1478       * @param progress for reporting write-progress; null is acceptable.
1479       * @param statistics file system statistics; null is acceptable.
1480       * @return an output stream for writing into the file
1481       * 
1482       * @see ClientProtocol#append(String, String) 
1483       */
1484      public HdfsDataOutputStream append(final String src, final int buffersize,
1485          final Progressable progress, final FileSystem.Statistics statistics
1486          ) throws IOException {
1487        final DFSOutputStream out = append(src, buffersize, progress);
1488        return new HdfsDataOutputStream(out, statistics, out.getInitialLen());
1489      }
1490    
1491      private DFSOutputStream append(String src, int buffersize, Progressable progress) 
1492          throws IOException {
1493        checkOpen();
1494        HdfsFileStatus stat = getFileInfo(src);
1495        if (stat == null) { // No file found
1496          throw new FileNotFoundException("failed to append to non-existent file "
1497              + src + " on client " + clientName);
1498        }
1499        final DFSOutputStream result = callAppend(stat, src, buffersize, progress);
1500        beginFileLease(src, result);
1501        return result;
1502      }
1503    
1504      /**
1505       * Set replication for an existing file.
1506       * @param src file name
1507       * @param replication
1508       * 
1509       * @see ClientProtocol#setReplication(String, short)
1510       */
1511      public boolean setReplication(String src, short replication)
1512          throws IOException {
1513        try {
1514          return namenode.setReplication(src, replication);
1515        } catch(RemoteException re) {
1516          throw re.unwrapRemoteException(AccessControlException.class,
1517                                         FileNotFoundException.class,
1518                                         SafeModeException.class,
1519                                         DSQuotaExceededException.class,
1520                                         UnresolvedPathException.class,
1521                                         SnapshotAccessControlException.class);
1522        }
1523      }
1524    
1525      /**
1526       * Rename file or directory.
1527       * @see ClientProtocol#rename(String, String)
1528       * @deprecated Use {@link #rename(String, String, Options.Rename...)} instead.
1529       */
1530      @Deprecated
1531      public boolean rename(String src, String dst) throws IOException {
1532        checkOpen();
1533        try {
1534          return namenode.rename(src, dst);
1535        } catch(RemoteException re) {
1536          throw re.unwrapRemoteException(AccessControlException.class,
1537                                         NSQuotaExceededException.class,
1538                                         DSQuotaExceededException.class,
1539                                         UnresolvedPathException.class,
1540                                         SnapshotAccessControlException.class);
1541        }
1542      }
1543    
1544      /**
1545       * Move blocks from src to trg and delete src
1546       * See {@link ClientProtocol#concat(String, String [])}. 
1547       */
1548      public void concat(String trg, String [] srcs) throws IOException {
1549        checkOpen();
1550        try {
1551          namenode.concat(trg, srcs);
1552        } catch(RemoteException re) {
1553          throw re.unwrapRemoteException(AccessControlException.class,
1554                                         UnresolvedPathException.class,
1555                                         SnapshotAccessControlException.class);
1556        }
1557      }
1558      /**
1559       * Rename file or directory.
1560       * @see ClientProtocol#rename2(String, String, Options.Rename...)
1561       */
1562      public void rename(String src, String dst, Options.Rename... options)
1563          throws IOException {
1564        checkOpen();
1565        try {
1566          namenode.rename2(src, dst, options);
1567        } catch(RemoteException re) {
1568          throw re.unwrapRemoteException(AccessControlException.class,
1569                                         DSQuotaExceededException.class,
1570                                         FileAlreadyExistsException.class,
1571                                         FileNotFoundException.class,
1572                                         ParentNotDirectoryException.class,
1573                                         SafeModeException.class,
1574                                         NSQuotaExceededException.class,
1575                                         UnresolvedPathException.class,
1576                                         SnapshotAccessControlException.class);
1577        }
1578      }
1579      /**
1580       * Delete file or directory.
1581       * See {@link ClientProtocol#delete(String, boolean)}. 
1582       */
1583      @Deprecated
1584      public boolean delete(String src) throws IOException {
1585        checkOpen();
1586        return namenode.delete(src, true);
1587      }
1588    
1589      /**
1590       * delete file or directory.
1591       * delete contents of the directory if non empty and recursive 
1592       * set to true
1593       *
1594       * @see ClientProtocol#delete(String, boolean)
1595       */
1596      public boolean delete(String src, boolean recursive) throws IOException {
1597        checkOpen();
1598        try {
1599          return namenode.delete(src, recursive);
1600        } catch(RemoteException re) {
1601          throw re.unwrapRemoteException(AccessControlException.class,
1602                                         FileNotFoundException.class,
1603                                         SafeModeException.class,
1604                                         UnresolvedPathException.class,
1605                                         SnapshotAccessControlException.class);
1606        }
1607      }
1608      
1609      /** Implemented using getFileInfo(src)
1610       */
1611      public boolean exists(String src) throws IOException {
1612        checkOpen();
1613        return getFileInfo(src) != null;
1614      }
1615    
1616      /**
1617       * Get a partial listing of the indicated directory
1618       * No block locations need to be fetched
1619       */
1620      public DirectoryListing listPaths(String src,  byte[] startAfter)
1621        throws IOException {
1622        return listPaths(src, startAfter, false);
1623      }
1624      
1625      /**
1626       * Get a partial listing of the indicated directory
1627       *
1628       * Recommend to use HdfsFileStatus.EMPTY_NAME as startAfter
1629       * if the application wants to fetch a listing starting from
1630       * the first entry in the directory
1631       *
1632       * @see ClientProtocol#getListing(String, byte[], boolean)
1633       */
1634      public DirectoryListing listPaths(String src,  byte[] startAfter,
1635          boolean needLocation) 
1636        throws IOException {
1637        checkOpen();
1638        try {
1639          return namenode.getListing(src, startAfter, needLocation);
1640        } catch(RemoteException re) {
1641          throw re.unwrapRemoteException(AccessControlException.class,
1642                                         FileNotFoundException.class,
1643                                         UnresolvedPathException.class);
1644        }
1645      }
1646    
1647      /**
1648       * Get the file info for a specific file or directory.
1649       * @param src The string representation of the path to the file
1650       * @return object containing information regarding the file
1651       *         or null if file not found
1652       *         
1653       * @see ClientProtocol#getFileInfo(String) for description of exceptions
1654       */
1655      public HdfsFileStatus getFileInfo(String src) throws IOException {
1656        checkOpen();
1657        try {
1658          return namenode.getFileInfo(src);
1659        } catch(RemoteException re) {
1660          throw re.unwrapRemoteException(AccessControlException.class,
1661                                         FileNotFoundException.class,
1662                                         UnresolvedPathException.class);
1663        }
1664      }
1665      
1666      /**
1667       * Close status of a file
1668       * @return true if file is already closed
1669       */
1670      public boolean isFileClosed(String src) throws IOException{
1671        checkOpen();
1672        try {
1673          return namenode.isFileClosed(src);
1674        } catch(RemoteException re) {
1675          throw re.unwrapRemoteException(AccessControlException.class,
1676                                         FileNotFoundException.class,
1677                                         UnresolvedPathException.class);
1678        }
1679      }
1680      
1681      /**
1682       * Get the file info for a specific file or directory. If src
1683       * refers to a symlink then the FileStatus of the link is returned.
1684       * @param src path to a file or directory.
1685       * 
1686       * For description of exceptions thrown 
1687       * @see ClientProtocol#getFileLinkInfo(String)
1688       */
1689      public HdfsFileStatus getFileLinkInfo(String src) throws IOException {
1690        checkOpen();
1691        try {
1692          return namenode.getFileLinkInfo(src);
1693        } catch(RemoteException re) {
1694          throw re.unwrapRemoteException(AccessControlException.class,
1695                                         UnresolvedPathException.class);
1696         }
1697       }
1698    
1699      /**
1700       * Get the checksum of a file.
1701       * @param src The file path
1702       * @return The checksum 
1703       * @see DistributedFileSystem#getFileChecksum(Path)
1704       */
1705      public MD5MD5CRC32FileChecksum getFileChecksum(String src) throws IOException {
1706        checkOpen();
1707        return getFileChecksum(src, clientName, namenode, socketFactory,
1708            dfsClientConf.socketTimeout, getDataEncryptionKey(),
1709            dfsClientConf.connectToDnViaHostname);
1710      }
1711      
1712      @InterfaceAudience.Private
1713      public void clearDataEncryptionKey() {
1714        LOG.debug("Clearing encryption key");
1715        synchronized (this) {
1716          encryptionKey = null;
1717        }
1718      }
1719      
1720      /**
1721       * @return true if data sent between this client and DNs should be encrypted,
1722       *         false otherwise.
1723       * @throws IOException in the event of error communicating with the NN
1724       */
1725      boolean shouldEncryptData() throws IOException {
1726        FsServerDefaults d = getServerDefaults();
1727        return d == null ? false : d.getEncryptDataTransfer();
1728      }
1729      
1730      @InterfaceAudience.Private
1731      public DataEncryptionKey getDataEncryptionKey()
1732          throws IOException {
1733        if (shouldEncryptData()) {
1734          synchronized (this) {
1735            if (encryptionKey == null ||
1736                encryptionKey.expiryDate < Time.now()) {
1737              LOG.debug("Getting new encryption token from NN");
1738              encryptionKey = namenode.getDataEncryptionKey();
1739            }
1740            return encryptionKey;
1741          }
1742        } else {
1743          return null;
1744        }
1745      }
1746    
1747      /**
1748       * Get the checksum of a file.
1749       * @param src The file path
1750       * @param clientName the name of the client requesting the checksum.
1751       * @param namenode the RPC proxy for the namenode
1752       * @param socketFactory to create sockets to connect to DNs
1753       * @param socketTimeout timeout to use when connecting and waiting for a response
1754       * @param encryptionKey the key needed to communicate with DNs in this cluster
1755       * @param connectToDnViaHostname whether the client should use hostnames instead of IPs
1756       * @return The checksum 
1757       */
1758      private static MD5MD5CRC32FileChecksum getFileChecksum(String src,
1759          String clientName,
1760          ClientProtocol namenode, SocketFactory socketFactory, int socketTimeout,
1761          DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
1762          throws IOException {
1763        //get all block locations
1764        LocatedBlocks blockLocations = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE);
1765        if (null == blockLocations) {
1766          throw new FileNotFoundException("File does not exist: " + src);
1767        }
1768        List<LocatedBlock> locatedblocks = blockLocations.getLocatedBlocks();
1769        final DataOutputBuffer md5out = new DataOutputBuffer();
1770        int bytesPerCRC = -1;
1771        DataChecksum.Type crcType = DataChecksum.Type.DEFAULT;
1772        long crcPerBlock = 0;
1773        boolean refetchBlocks = false;
1774        int lastRetriedIndex = -1;
1775    
1776        //get block checksum for each block
1777        for(int i = 0; i < locatedblocks.size(); i++) {
1778          if (refetchBlocks) {  // refetch to get fresh tokens
1779            blockLocations = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE);
1780            if (null == blockLocations) {
1781              throw new FileNotFoundException("File does not exist: " + src);
1782            }
1783            locatedblocks = blockLocations.getLocatedBlocks();
1784            refetchBlocks = false;
1785          }
1786          LocatedBlock lb = locatedblocks.get(i);
1787          final ExtendedBlock block = lb.getBlock();
1788          final DatanodeInfo[] datanodes = lb.getLocations();
1789          
1790          //try each datanode location of the block
1791          final int timeout = 3000 * datanodes.length + socketTimeout;
1792          boolean done = false;
1793          for(int j = 0; !done && j < datanodes.length; j++) {
1794            DataOutputStream out = null;
1795            DataInputStream in = null;
1796            
1797            try {
1798              //connect to a datanode
1799              IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname,
1800                  encryptionKey, datanodes[j], timeout);
1801              out = new DataOutputStream(new BufferedOutputStream(pair.out,
1802                  HdfsConstants.SMALL_BUFFER_SIZE));
1803              in = new DataInputStream(pair.in);
1804    
1805              if (LOG.isDebugEnabled()) {
1806                LOG.debug("write to " + datanodes[j] + ": "
1807                    + Op.BLOCK_CHECKSUM + ", block=" + block);
1808              }
1809              // get block MD5
1810              new Sender(out).blockChecksum(block, lb.getBlockToken());
1811    
1812              final BlockOpResponseProto reply =
1813                BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
1814    
1815              if (reply.getStatus() != Status.SUCCESS) {
1816                if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
1817                  throw new InvalidBlockTokenException();
1818                } else {
1819                  throw new IOException("Bad response " + reply + " for block "
1820                      + block + " from datanode " + datanodes[j]);
1821                }
1822              }
1823              
1824              OpBlockChecksumResponseProto checksumData =
1825                reply.getChecksumResponse();
1826    
1827              //read byte-per-checksum
1828              final int bpc = checksumData.getBytesPerCrc();
1829              if (i == 0) { //first block
1830                bytesPerCRC = bpc;
1831              }
1832              else if (bpc != bytesPerCRC) {
1833                throw new IOException("Byte-per-checksum not matched: bpc=" + bpc
1834                    + " but bytesPerCRC=" + bytesPerCRC);
1835              }
1836              
1837              //read crc-per-block
1838              final long cpb = checksumData.getCrcPerBlock();
1839              if (locatedblocks.size() > 1 && i == 0) {
1840                crcPerBlock = cpb;
1841              }
1842    
1843              //read md5
1844              final MD5Hash md5 = new MD5Hash(
1845                  checksumData.getMd5().toByteArray());
1846              md5.write(md5out);
1847              
1848              // read crc-type
1849              final DataChecksum.Type ct;
1850              if (checksumData.hasCrcType()) {
1851                ct = PBHelper.convert(checksumData
1852                    .getCrcType());
1853              } else {
1854                LOG.debug("Retrieving checksum from an earlier-version DataNode: " +
1855                          "inferring checksum by reading first byte");
1856                ct = inferChecksumTypeByReading(
1857                    clientName, socketFactory, socketTimeout, lb, datanodes[j],
1858                    encryptionKey, connectToDnViaHostname);
1859              }
1860    
1861              if (i == 0) { // first block
1862                crcType = ct;
1863              } else if (crcType != DataChecksum.Type.MIXED
1864                  && crcType != ct) {
1865                // if crc types are mixed in a file
1866                crcType = DataChecksum.Type.MIXED;
1867              }
1868    
1869              done = true;
1870    
1871              if (LOG.isDebugEnabled()) {
1872                if (i == 0) {
1873                  LOG.debug("set bytesPerCRC=" + bytesPerCRC
1874                      + ", crcPerBlock=" + crcPerBlock);
1875                }
1876                LOG.debug("got reply from " + datanodes[j] + ": md5=" + md5);
1877              }
1878            } catch (InvalidBlockTokenException ibte) {
1879              if (i > lastRetriedIndex) {
1880                if (LOG.isDebugEnabled()) {
1881                  LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM "
1882                      + "for file " + src + " for block " + block
1883                      + " from datanode " + datanodes[j]
1884                      + ". Will retry the block once.");
1885                }
1886                lastRetriedIndex = i;
1887                done = true; // actually it's not done; but we'll retry
1888                i--; // repeat at i-th block
1889                refetchBlocks = true;
1890                break;
1891              }
1892            } catch (IOException ie) {
1893              LOG.warn("src=" + src + ", datanodes["+j+"]=" + datanodes[j], ie);
1894            } finally {
1895              IOUtils.closeStream(in);
1896              IOUtils.closeStream(out);
1897            }
1898          }
1899    
1900          if (!done) {
1901            throw new IOException("Fail to get block MD5 for " + block);
1902          }
1903        }
1904    
1905        //compute file MD5
1906        final MD5Hash fileMD5 = MD5Hash.digest(md5out.getData()); 
1907        switch (crcType) {
1908          case CRC32:
1909            return new MD5MD5CRC32GzipFileChecksum(bytesPerCRC,
1910                crcPerBlock, fileMD5);
1911          case CRC32C:
1912            return new MD5MD5CRC32CastagnoliFileChecksum(bytesPerCRC,
1913                crcPerBlock, fileMD5);
1914          default:
1915            // If there is no block allocated for the file,
1916            // return one with the magic entry that matches what previous
1917            // hdfs versions return.
1918            if (locatedblocks.size() == 0) {
1919              return new MD5MD5CRC32GzipFileChecksum(0, 0, fileMD5);
1920            }
1921    
1922            // we should never get here since the validity was checked
1923            // when getCrcType() was called above.
1924            return null;
1925        }
1926      }
1927    
1928      /**
1929       * Connect to the given datanode's datantrasfer port, and return
1930       * the resulting IOStreamPair. This includes encryption wrapping, etc.
1931       */
1932      private static IOStreamPair connectToDN(
1933          SocketFactory socketFactory, boolean connectToDnViaHostname,
1934          DataEncryptionKey encryptionKey, DatanodeInfo dn, int timeout)
1935          throws IOException
1936      {
1937        boolean success = false;
1938        Socket sock = null;
1939        try {
1940          sock = socketFactory.createSocket();
1941          String dnAddr = dn.getXferAddr(connectToDnViaHostname);
1942          if (LOG.isDebugEnabled()) {
1943            LOG.debug("Connecting to datanode " + dnAddr);
1944          }
1945          NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout);
1946          sock.setSoTimeout(timeout);
1947      
1948          OutputStream unbufOut = NetUtils.getOutputStream(sock);
1949          InputStream unbufIn = NetUtils.getInputStream(sock);
1950          IOStreamPair ret;
1951          if (encryptionKey != null) {
1952            ret = DataTransferEncryptor.getEncryptedStreams(
1953                    unbufOut, unbufIn, encryptionKey);
1954          } else {
1955            ret = new IOStreamPair(unbufIn, unbufOut);        
1956          }
1957          success = true;
1958          return ret;
1959        } finally {
1960          if (!success) {
1961            IOUtils.closeSocket(sock);
1962          }
1963        }
1964      }
1965      
1966      /**
1967       * Infer the checksum type for a replica by sending an OP_READ_BLOCK
1968       * for the first byte of that replica. This is used for compatibility
1969       * with older HDFS versions which did not include the checksum type in
1970       * OpBlockChecksumResponseProto.
1971       *
1972       * @param in input stream from datanode
1973       * @param out output stream to datanode
1974       * @param lb the located block
1975       * @param clientName the name of the DFSClient requesting the checksum
1976       * @param dn the connected datanode
1977       * @return the inferred checksum type
1978       * @throws IOException if an error occurs
1979       */
1980      private static Type inferChecksumTypeByReading(
1981          String clientName, SocketFactory socketFactory, int socketTimeout,
1982          LocatedBlock lb, DatanodeInfo dn,
1983          DataEncryptionKey encryptionKey, boolean connectToDnViaHostname)
1984          throws IOException {
1985        IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname,
1986            encryptionKey, dn, socketTimeout);
1987    
1988        try {
1989          DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out,
1990              HdfsConstants.SMALL_BUFFER_SIZE));
1991          DataInputStream in = new DataInputStream(pair.in);
1992      
1993          new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName, 0, 1, true);
1994          final BlockOpResponseProto reply =
1995              BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in));
1996          
1997          if (reply.getStatus() != Status.SUCCESS) {
1998            if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) {
1999              throw new InvalidBlockTokenException();
2000            } else {
2001              throw new IOException("Bad response " + reply + " trying to read "
2002                  + lb.getBlock() + " from datanode " + dn);
2003            }
2004          }
2005          
2006          return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType());
2007        } finally {
2008          IOUtils.cleanup(null, pair.in, pair.out);
2009        }
2010      }
2011    
2012      /**
2013       * Set permissions to a file or directory.
2014       * @param src path name.
2015       * @param permission
2016       * 
2017       * @see ClientProtocol#setPermission(String, FsPermission)
2018       */
2019      public void setPermission(String src, FsPermission permission)
2020          throws IOException {
2021        checkOpen();
2022        try {
2023          namenode.setPermission(src, permission);
2024        } catch(RemoteException re) {
2025          throw re.unwrapRemoteException(AccessControlException.class,
2026                                         FileNotFoundException.class,
2027                                         SafeModeException.class,
2028                                         UnresolvedPathException.class,
2029                                         SnapshotAccessControlException.class);
2030        }
2031      }
2032    
2033      /**
2034       * Set file or directory owner.
2035       * @param src path name.
2036       * @param username user id.
2037       * @param groupname user group.
2038       * 
2039       * @see ClientProtocol#setOwner(String, String, String)
2040       */
2041      public void setOwner(String src, String username, String groupname)
2042          throws IOException {
2043        checkOpen();
2044        try {
2045          namenode.setOwner(src, username, groupname);
2046        } catch(RemoteException re) {
2047          throw re.unwrapRemoteException(AccessControlException.class,
2048                                         FileNotFoundException.class,
2049                                         SafeModeException.class,
2050                                         UnresolvedPathException.class,
2051                                         SnapshotAccessControlException.class);                                   
2052        }
2053      }
2054    
2055      /**
2056       * @see ClientProtocol#getStats()
2057       */
2058      public FsStatus getDiskStatus() throws IOException {
2059        long rawNums[] = namenode.getStats();
2060        return new FsStatus(rawNums[0], rawNums[1], rawNums[2]);
2061      }
2062    
2063      /**
2064       * Returns count of blocks with no good replicas left. Normally should be 
2065       * zero.
2066       * @throws IOException
2067       */ 
2068      public long getMissingBlocksCount() throws IOException {
2069        return namenode.getStats()[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX];
2070      }
2071      
2072      /**
2073       * Returns count of blocks with one of more replica missing.
2074       * @throws IOException
2075       */ 
2076      public long getUnderReplicatedBlocksCount() throws IOException {
2077        return namenode.getStats()[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX];
2078      }
2079      
2080      /**
2081       * Returns count of blocks with at least one replica marked corrupt. 
2082       * @throws IOException
2083       */ 
2084      public long getCorruptBlocksCount() throws IOException {
2085        return namenode.getStats()[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX];
2086      }
2087      
2088      /**
2089       * @return a list in which each entry describes a corrupt file/block
2090       * @throws IOException
2091       */
2092      public CorruptFileBlocks listCorruptFileBlocks(String path,
2093                                                     String cookie)
2094        throws IOException {
2095        return namenode.listCorruptFileBlocks(path, cookie);
2096      }
2097    
2098      public DatanodeInfo[] datanodeReport(DatanodeReportType type)
2099      throws IOException {
2100        return namenode.getDatanodeReport(type);
2101      }
2102        
2103      /**
2104       * Enter, leave or get safe mode.
2105       * 
2106       * @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction,boolean)
2107       */
2108      public boolean setSafeMode(SafeModeAction action) throws IOException {
2109        return setSafeMode(action, false);
2110      }
2111      
2112      /**
2113       * Enter, leave or get safe mode.
2114       * 
2115       * @param action
2116       *          One of SafeModeAction.GET, SafeModeAction.ENTER and
2117       *          SafeModeActiob.LEAVE
2118       * @param isChecked
2119       *          If true, then check only active namenode's safemode status, else
2120       *          check first namenode's status.
2121       * @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction, boolean)
2122       */
2123      public boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException{
2124        return namenode.setSafeMode(action, isChecked);    
2125      }
2126     
2127      /**
2128       * Create one snapshot.
2129       * 
2130       * @param snapshotRoot The directory where the snapshot is to be taken
2131       * @param snapshotName Name of the snapshot
2132       * @return the snapshot path.
2133       * @see ClientProtocol#createSnapshot(String, String)
2134       */
2135      public String createSnapshot(String snapshotRoot, String snapshotName)
2136          throws IOException {
2137        checkOpen();
2138        try {
2139          return namenode.createSnapshot(snapshotRoot, snapshotName);
2140        } catch(RemoteException re) {
2141          throw re.unwrapRemoteException();
2142        }
2143      }
2144      
2145      /**
2146       * Delete a snapshot of a snapshottable directory.
2147       * 
2148       * @param snapshotRoot The snapshottable directory that the 
2149       *                    to-be-deleted snapshot belongs to
2150       * @param snapshotName The name of the to-be-deleted snapshot
2151       * @throws IOException
2152       * @see ClientProtocol#deleteSnapshot(String, String)
2153       */
2154      public void deleteSnapshot(String snapshotRoot, String snapshotName)
2155          throws IOException {
2156        try {
2157          namenode.deleteSnapshot(snapshotRoot, snapshotName);
2158        } catch(RemoteException re) {
2159          throw re.unwrapRemoteException();
2160        }
2161      }
2162      
2163      /**
2164       * Rename a snapshot.
2165       * @param snapshotDir The directory path where the snapshot was taken
2166       * @param snapshotOldName Old name of the snapshot
2167       * @param snapshotNewName New name of the snapshot
2168       * @throws IOException
2169       * @see ClientProtocol#renameSnapshot(String, String, String)
2170       */
2171      public void renameSnapshot(String snapshotDir, String snapshotOldName,
2172          String snapshotNewName) throws IOException {
2173        checkOpen();
2174        try {
2175          namenode.renameSnapshot(snapshotDir, snapshotOldName, snapshotNewName);
2176        } catch(RemoteException re) {
2177          throw re.unwrapRemoteException();
2178        }
2179      }
2180      
2181      /**
2182       * Get all the current snapshottable directories.
2183       * @return All the current snapshottable directories
2184       * @throws IOException
2185       * @see ClientProtocol#getSnapshottableDirListing()
2186       */
2187      public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
2188          throws IOException {
2189        checkOpen();
2190        try {
2191          return namenode.getSnapshottableDirListing();
2192        } catch(RemoteException re) {
2193          throw re.unwrapRemoteException();
2194        }
2195      }
2196    
2197      /**
2198       * Allow snapshot on a directory.
2199       * 
2200       * @see ClientProtocol#allowSnapshot(String snapshotRoot)
2201       */
2202      public void allowSnapshot(String snapshotRoot) throws IOException {
2203        checkOpen();
2204        try {
2205          namenode.allowSnapshot(snapshotRoot);
2206        } catch (RemoteException re) {
2207          throw re.unwrapRemoteException();
2208        }
2209      }
2210      
2211      /**
2212       * Disallow snapshot on a directory.
2213       * 
2214       * @see ClientProtocol#disallowSnapshot(String snapshotRoot)
2215       */
2216      public void disallowSnapshot(String snapshotRoot) throws IOException {
2217        checkOpen();
2218        try {
2219          namenode.disallowSnapshot(snapshotRoot);
2220        } catch (RemoteException re) {
2221          throw re.unwrapRemoteException();
2222        }
2223      }
2224      
2225      /**
2226       * Get the difference between two snapshots, or between a snapshot and the
2227       * current tree of a directory.
2228       * @see ClientProtocol#getSnapshotDiffReport(String, String, String)
2229       */
2230      public SnapshotDiffReport getSnapshotDiffReport(String snapshotDir,
2231          String fromSnapshot, String toSnapshot) throws IOException {
2232        checkOpen();
2233        try {
2234          return namenode.getSnapshotDiffReport(snapshotDir,
2235              fromSnapshot, toSnapshot);
2236        } catch(RemoteException re) {
2237          throw re.unwrapRemoteException();
2238        }
2239      }
2240      
2241      /**
2242       * Save namespace image.
2243       * 
2244       * @see ClientProtocol#saveNamespace()
2245       */
2246      void saveNamespace() throws AccessControlException, IOException {
2247        try {
2248          namenode.saveNamespace();
2249        } catch(RemoteException re) {
2250          throw re.unwrapRemoteException(AccessControlException.class);
2251        }
2252      }
2253    
2254      /**
2255       * Rolls the edit log on the active NameNode.
2256       * @return the txid of the new log segment 
2257       *
2258       * @see ClientProtocol#rollEdits()
2259       */
2260      long rollEdits() throws AccessControlException, IOException {
2261        try {
2262          return namenode.rollEdits();
2263        } catch(RemoteException re) {
2264          throw re.unwrapRemoteException(AccessControlException.class);
2265        }
2266      }
2267      
2268      /**
2269       * enable/disable restore failed storage.
2270       * 
2271       * @see ClientProtocol#restoreFailedStorage(String arg)
2272       */
2273      boolean restoreFailedStorage(String arg)
2274          throws AccessControlException, IOException{
2275        return namenode.restoreFailedStorage(arg);
2276      }
2277    
2278      /**
2279       * Refresh the hosts and exclude files.  (Rereads them.)
2280       * See {@link ClientProtocol#refreshNodes()} 
2281       * for more details.
2282       * 
2283       * @see ClientProtocol#refreshNodes()
2284       */
2285      public void refreshNodes() throws IOException {
2286        namenode.refreshNodes();
2287      }
2288    
2289      /**
2290       * Dumps DFS data structures into specified file.
2291       * 
2292       * @see ClientProtocol#metaSave(String)
2293       */
2294      public void metaSave(String pathname) throws IOException {
2295        namenode.metaSave(pathname);
2296      }
2297    
2298      /**
2299       * Requests the namenode to tell all datanodes to use a new, non-persistent
2300       * bandwidth value for dfs.balance.bandwidthPerSec.
2301       * See {@link ClientProtocol#setBalancerBandwidth(long)} 
2302       * for more details.
2303       * 
2304       * @see ClientProtocol#setBalancerBandwidth(long)
2305       */
2306      public void setBalancerBandwidth(long bandwidth) throws IOException {
2307        namenode.setBalancerBandwidth(bandwidth);
2308      }
2309        
2310      /**
2311       * @see ClientProtocol#finalizeUpgrade()
2312       */
2313      public void finalizeUpgrade() throws IOException {
2314        namenode.finalizeUpgrade();
2315      }
2316    
2317      /**
2318       */
2319      @Deprecated
2320      public boolean mkdirs(String src) throws IOException {
2321        return mkdirs(src, null, true);
2322      }
2323    
2324      /**
2325       * Create a directory (or hierarchy of directories) with the given
2326       * name and permission.
2327       *
2328       * @param src The path of the directory being created
2329       * @param permission The permission of the directory being created.
2330       * If permission == null, use {@link FsPermission#getDefault()}.
2331       * @param createParent create missing parent directory if true
2332       * 
2333       * @return True if the operation success.
2334       * 
2335       * @see ClientProtocol#mkdirs(String, FsPermission, boolean)
2336       */
2337      public boolean mkdirs(String src, FsPermission permission,
2338          boolean createParent) throws IOException {
2339        if (permission == null) {
2340          permission = FsPermission.getDefault();
2341        }
2342        FsPermission masked = permission.applyUMask(dfsClientConf.uMask);
2343        return primitiveMkdir(src, masked, createParent);
2344      }
2345    
2346      /**
2347       * Same {{@link #mkdirs(String, FsPermission, boolean)} except
2348       * that the permissions has already been masked against umask.
2349       */
2350      public boolean primitiveMkdir(String src, FsPermission absPermission)
2351        throws IOException {
2352        return primitiveMkdir(src, absPermission, true);
2353      }
2354    
2355      /**
2356       * Same {{@link #mkdirs(String, FsPermission, boolean)} except
2357       * that the permissions has already been masked against umask.
2358       */
2359      public boolean primitiveMkdir(String src, FsPermission absPermission, 
2360        boolean createParent)
2361        throws IOException {
2362        checkOpen();
2363        if (absPermission == null) {
2364          absPermission = 
2365            FsPermission.getDefault().applyUMask(dfsClientConf.uMask);
2366        } 
2367    
2368        if(LOG.isDebugEnabled()) {
2369          LOG.debug(src + ": masked=" + absPermission);
2370        }
2371        try {
2372          return namenode.mkdirs(src, absPermission, createParent);
2373        } catch(RemoteException re) {
2374          throw re.unwrapRemoteException(AccessControlException.class,
2375                                         InvalidPathException.class,
2376                                         FileAlreadyExistsException.class,
2377                                         FileNotFoundException.class,
2378                                         ParentNotDirectoryException.class,
2379                                         SafeModeException.class,
2380                                         NSQuotaExceededException.class,
2381                                         DSQuotaExceededException.class,
2382                                         UnresolvedPathException.class,
2383                                         SnapshotAccessControlException.class);
2384        }
2385      }
2386      
2387      /**
2388       * Get {@link ContentSummary} rooted at the specified directory.
2389       * @param path The string representation of the path
2390       * 
2391       * @see ClientProtocol#getContentSummary(String)
2392       */
2393      ContentSummary getContentSummary(String src) throws IOException {
2394        try {
2395          return namenode.getContentSummary(src);
2396        } catch(RemoteException re) {
2397          throw re.unwrapRemoteException(AccessControlException.class,
2398                                         FileNotFoundException.class,
2399                                         UnresolvedPathException.class);
2400        }
2401      }
2402    
2403      /**
2404       * Sets or resets quotas for a directory.
2405       * @see ClientProtocol#setQuota(String, long, long)
2406       */
2407      void setQuota(String src, long namespaceQuota, long diskspaceQuota) 
2408          throws IOException {
2409        // sanity check
2410        if ((namespaceQuota <= 0 && namespaceQuota != HdfsConstants.QUOTA_DONT_SET &&
2411             namespaceQuota != HdfsConstants.QUOTA_RESET) ||
2412            (diskspaceQuota <= 0 && diskspaceQuota != HdfsConstants.QUOTA_DONT_SET &&
2413             diskspaceQuota != HdfsConstants.QUOTA_RESET)) {
2414          throw new IllegalArgumentException("Invalid values for quota : " +
2415                                             namespaceQuota + " and " + 
2416                                             diskspaceQuota);
2417                                             
2418        }
2419        try {
2420          namenode.setQuota(src, namespaceQuota, diskspaceQuota);
2421        } catch(RemoteException re) {
2422          throw re.unwrapRemoteException(AccessControlException.class,
2423                                         FileNotFoundException.class,
2424                                         NSQuotaExceededException.class,
2425                                         DSQuotaExceededException.class,
2426                                         UnresolvedPathException.class,
2427                                         SnapshotAccessControlException.class);
2428        }
2429      }
2430    
2431      /**
2432       * set the modification and access time of a file
2433       * 
2434       * @see ClientProtocol#setTimes(String, long, long)
2435       */
2436      public void setTimes(String src, long mtime, long atime) throws IOException {
2437        checkOpen();
2438        try {
2439          namenode.setTimes(src, mtime, atime);
2440        } catch(RemoteException re) {
2441          throw re.unwrapRemoteException(AccessControlException.class,
2442                                         FileNotFoundException.class,
2443                                         UnresolvedPathException.class,
2444                                         SnapshotAccessControlException.class);
2445        }
2446      }
2447    
2448      /**
2449       * @deprecated use {@link HdfsDataInputStream} instead.
2450       */
2451      @Deprecated
2452      public static class DFSDataInputStream extends HdfsDataInputStream {
2453    
2454        public DFSDataInputStream(DFSInputStream in) throws IOException {
2455          super(in);
2456        }
2457      }
2458    
2459      void reportChecksumFailure(String file, ExtendedBlock blk, DatanodeInfo dn) {
2460        DatanodeInfo [] dnArr = { dn };
2461        LocatedBlock [] lblocks = { new LocatedBlock(blk, dnArr) };
2462        reportChecksumFailure(file, lblocks);
2463      }
2464        
2465      // just reports checksum failure and ignores any exception during the report.
2466      void reportChecksumFailure(String file, LocatedBlock lblocks[]) {
2467        try {
2468          reportBadBlocks(lblocks);
2469        } catch (IOException ie) {
2470          LOG.info("Found corruption while reading " + file
2471              + ". Error repairing corrupt blocks. Bad blocks remain.", ie);
2472        }
2473      }
2474    
2475      @Override
2476      public String toString() {
2477        return getClass().getSimpleName() + "[clientName=" + clientName
2478            + ", ugi=" + ugi + "]"; 
2479      }
2480    
2481      public DomainSocketFactory getDomainSocketFactory() {
2482        return domainSocketFactory;
2483      }
2484    
2485      public void disableLegacyBlockReaderLocal() {
2486        shouldUseLegacyBlockReaderLocal = false;
2487      }
2488    
2489      public boolean useLegacyBlockReaderLocal() {
2490        return shouldUseLegacyBlockReaderLocal;
2491      }
2492    }