001    /**
002     * Licensed to the Apache Software Foundation (ASF) under one
003     * or more contributor license agreements.  See the NOTICE file
004     * distributed with this work for additional information
005     * regarding copyright ownership.  The ASF licenses this file
006     * to you under the Apache License, Version 2.0 (the
007     * "License"); you may not use this file except in compliance
008     * with the License.  You may obtain a copy of the License at
009     *
010     *     http://www.apache.org/licenses/LICENSE-2.0
011     *
012     * Unless required by applicable law or agreed to in writing, software
013     * distributed under the License is distributed on an "AS IS" BASIS,
014     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015     * See the License for the specific language governing permissions and
016     * limitations under the License.
017     */
018    package org.apache.hadoop.hdfs;
019    
020    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
021    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
022    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
023    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
024    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT;
025    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY;
026    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT;
027    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY;
028    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT;
029    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_KEY;
030    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT;
031    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY;
032    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT;
033    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY;
034    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT;
035    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY;
036    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT;
037    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
038    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY;
039    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE;
040    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT;
041    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY;
042    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
043    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER;
044    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT;
045    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
046    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
047    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
048    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
049    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
050    
051    import java.io.BufferedOutputStream;
052    import java.io.DataInputStream;
053    import java.io.DataOutputStream;
054    import java.io.FileNotFoundException;
055    import java.io.IOException;
056    import java.io.OutputStream;
057    import java.net.InetAddress;
058    import java.net.InetSocketAddress;
059    import java.net.NetworkInterface;
060    import java.net.Socket;
061    import java.net.SocketException;
062    import java.net.SocketAddress;
063    import java.net.URI;
064    import java.net.UnknownHostException;
065    import java.util.ArrayList;
066    import java.util.Collections;
067    import java.util.EnumSet;
068    import java.util.HashMap;
069    import java.util.List;
070    import java.util.Map;
071    import java.util.Random;
072    
073    import javax.net.SocketFactory;
074    
075    import org.apache.commons.logging.Log;
076    import org.apache.commons.logging.LogFactory;
077    import org.apache.hadoop.classification.InterfaceAudience;
078    import org.apache.hadoop.conf.Configuration;
079    import org.apache.hadoop.fs.BlockLocation;
080    import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
081    import org.apache.hadoop.fs.ContentSummary;
082    import org.apache.hadoop.fs.CreateFlag;
083    import org.apache.hadoop.fs.FileAlreadyExistsException;
084    import org.apache.hadoop.fs.FileSystem;
085    import org.apache.hadoop.fs.FsServerDefaults;
086    import org.apache.hadoop.fs.FsStatus;
087    import org.apache.hadoop.fs.InvalidPathException;
088    import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
089    import org.apache.hadoop.fs.Options;
090    import org.apache.hadoop.fs.ParentNotDirectoryException;
091    import org.apache.hadoop.fs.Path;
092    import org.apache.hadoop.fs.UnresolvedLinkException;
093    import org.apache.hadoop.fs.permission.FsPermission;
094    import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
095    import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
096    import org.apache.hadoop.hdfs.protocol.ClientProtocol;
097    import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
098    import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
099    import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
100    import org.apache.hadoop.hdfs.protocol.DirectoryListing;
101    import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
102    import org.apache.hadoop.hdfs.protocol.HdfsConstants;
103    import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
104    import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
105    import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
106    import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
107    import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
108    import org.apache.hadoop.hdfs.protocol.LocatedBlock;
109    import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
110    import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
111    import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
112    import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
113    import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
114    import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
115    import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
116    import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
117    import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
118    import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
119    import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
120    import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
121    import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
122    import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
123    import org.apache.hadoop.hdfs.server.namenode.NameNode;
124    import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
125    import org.apache.hadoop.io.DataOutputBuffer;
126    import org.apache.hadoop.io.EnumSetWritable;
127    import org.apache.hadoop.io.IOUtils;
128    import org.apache.hadoop.io.MD5Hash;
129    import org.apache.hadoop.io.Text;
130    import org.apache.hadoop.ipc.Client;
131    import org.apache.hadoop.ipc.RPC;
132    import org.apache.hadoop.ipc.RemoteException;
133    import org.apache.hadoop.net.DNS;
134    import org.apache.hadoop.net.NetUtils;
135    import org.apache.hadoop.security.AccessControlException;
136    import org.apache.hadoop.security.UserGroupInformation;
137    import org.apache.hadoop.security.token.SecretManager.InvalidToken;
138    import org.apache.hadoop.security.token.Token;
139    import org.apache.hadoop.security.token.TokenRenewer;
140    import org.apache.hadoop.util.DataChecksum;
141    import org.apache.hadoop.util.Progressable;
142    
143    import com.google.common.base.Joiner;
144    import com.google.common.base.Preconditions;
145    import com.google.common.net.InetAddresses;
146    
147    /********************************************************
148     * DFSClient can connect to a Hadoop Filesystem and 
149     * perform basic file tasks.  It uses the ClientProtocol
150     * to communicate with a NameNode daemon, and connects 
151     * directly to DataNodes to read/write block data.
152     *
153     * Hadoop DFS users should obtain an instance of 
154     * DistributedFileSystem, which uses DFSClient to handle
155     * filesystem tasks.
156     *
157     ********************************************************/
158    @InterfaceAudience.Private
159    public class DFSClient implements java.io.Closeable {
160      public static final Log LOG = LogFactory.getLog(DFSClient.class);
161      public static final long SERVER_DEFAULTS_VALIDITY_PERIOD = 60 * 60 * 1000L; // 1 hour
162      static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
163      final ClientProtocol namenode;
164      /* The service used for delegation tokens */
165      private Text dtService;
166    
167      final UserGroupInformation ugi;
168      volatile boolean clientRunning = true;
169      volatile long lastLeaseRenewal;
170      private volatile FsServerDefaults serverDefaults;
171      private volatile long serverDefaultsLastUpdate;
172      final String clientName;
173      Configuration conf;
174      SocketFactory socketFactory;
175      final ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure;
176      final FileSystem.Statistics stats;
177      final int hdfsTimeout;    // timeout value for a DFS operation.
178      final LeaseRenewer leaserenewer;
179      final SocketCache socketCache;
180      final Conf dfsClientConf;
181      private Random r = new Random();
182      private SocketAddress[] localInterfaceAddrs;
183    
184      /**
185       * DFSClient configuration 
186       */
187      static class Conf {
188        final int maxFailoverAttempts;
189        final int failoverSleepBaseMillis;
190        final int failoverSleepMaxMillis;
191        final int maxBlockAcquireFailures;
192        final int confTime;
193        final int ioBufferSize;
194        final int checksumType;
195        final int bytesPerChecksum;
196        final int writePacketSize;
197        final int socketTimeout;
198        final int socketCacheCapacity;
199        /** Wait time window (in msec) if BlockMissingException is caught */
200        final int timeWindow;
201        final int nCachedConnRetry;
202        final int nBlockWriteRetry;
203        final int nBlockWriteLocateFollowingRetry;
204        final long defaultBlockSize;
205        final long prefetchSize;
206        final short defaultReplication;
207        final String taskId;
208        final FsPermission uMask;
209        final boolean useLegacyBlockReader;
210    
211        Conf(Configuration conf) {
212          maxFailoverAttempts = conf.getInt(
213              DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
214              DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
215          failoverSleepBaseMillis = conf.getInt(
216              DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
217              DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);
218          failoverSleepMaxMillis = conf.getInt(
219              DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY,
220              DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT);
221    
222          maxBlockAcquireFailures = conf.getInt(
223              DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
224              DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT);
225          confTime = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
226              HdfsServerConstants.WRITE_TIMEOUT);
227          ioBufferSize = conf.getInt(
228              CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
229              CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
230          checksumType = getChecksumType(conf);
231          bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
232              DFS_BYTES_PER_CHECKSUM_DEFAULT);
233          socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
234              HdfsServerConstants.READ_TIMEOUT);
235          /** dfs.write.packet.size is an internal config variable */
236          writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
237              DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
238          defaultBlockSize = conf.getLongBytes(DFS_BLOCK_SIZE_KEY,
239              DFS_BLOCK_SIZE_DEFAULT);
240          defaultReplication = (short) conf.getInt(
241              DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);
242          taskId = conf.get("mapreduce.task.attempt.id", "NONMAPREDUCE");
243          socketCacheCapacity = conf.getInt(DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY,
244              DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT);
245          prefetchSize = conf.getLong(DFS_CLIENT_READ_PREFETCH_SIZE_KEY,
246              10 * defaultBlockSize);
247          timeWindow = conf
248              .getInt(DFS_CLIENT_RETRY_WINDOW_BASE, 3000);
249          nCachedConnRetry = conf.getInt(DFS_CLIENT_CACHED_CONN_RETRY_KEY,
250              DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT);
251          nBlockWriteRetry = conf.getInt(DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY,
252              DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT);
253          nBlockWriteLocateFollowingRetry = conf
254              .getInt(DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY,
255                  DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT);
256          uMask = FsPermission.getUMask(conf);
257          useLegacyBlockReader = conf.getBoolean(
258              DFS_CLIENT_USE_LEGACY_BLOCKREADER,
259              DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT);
260        }
261    
262        private int getChecksumType(Configuration conf) {
263          String checksum = conf.get(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY,
264              DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
265          if ("CRC32".equals(checksum)) {
266            return DataChecksum.CHECKSUM_CRC32;
267          } else if ("CRC32C".equals(checksum)) {
268            return DataChecksum.CHECKSUM_CRC32C;
269          } else if ("NULL".equals(checksum)) {
270            return DataChecksum.CHECKSUM_NULL;
271          } else {
272            LOG.warn("Bad checksum type: " + checksum + ". Using default.");
273            return DataChecksum.CHECKSUM_CRC32C;
274          }
275        }
276    
277        private DataChecksum createChecksum() {
278          return DataChecksum.newDataChecksum(
279              checksumType, bytesPerChecksum);
280        }
281      }
282     
283      Conf getConf() {
284        return dfsClientConf;
285      }
286      
287      /**
288       * A map from file names to {@link DFSOutputStream} objects
289       * that are currently being written by this client.
290       * Note that a file can only be written by a single client.
291       */
292      private final Map<String, DFSOutputStream> filesBeingWritten
293          = new HashMap<String, DFSOutputStream>();
294    
295      private boolean shortCircuitLocalReads;
296      
297      /**
298       * Same as this(NameNode.getAddress(conf), conf);
299       * @see #DFSClient(InetSocketAddress, Configuration)
300       * @deprecated Deprecated at 0.21
301       */
302      @Deprecated
303      public DFSClient(Configuration conf) throws IOException {
304        this(NameNode.getAddress(conf), conf);
305      }
306      
307      public DFSClient(InetSocketAddress address, Configuration conf) throws IOException {
308        this(NameNode.getUri(address), conf);
309      }
310    
311      /**
312       * Same as this(nameNodeUri, conf, null);
313       * @see #DFSClient(InetSocketAddress, Configuration, org.apache.hadoop.fs.FileSystem.Statistics)
314       */
315      public DFSClient(URI nameNodeUri, Configuration conf
316          ) throws IOException {
317        this(nameNodeUri, conf, null);
318      }
319    
320      /**
321       * Same as this(nameNodeUri, null, conf, stats);
322       * @see #DFSClient(InetSocketAddress, ClientProtocol, Configuration, org.apache.hadoop.fs.FileSystem.Statistics) 
323       */
324      public DFSClient(URI nameNodeUri, Configuration conf,
325                       FileSystem.Statistics stats)
326        throws IOException {
327        this(nameNodeUri, null, conf, stats);
328      }
329      
330      /** 
331       * Create a new DFSClient connected to the given nameNodeUri or rpcNamenode.
332       * Exactly one of nameNodeUri or rpcNamenode must be null.
333       */
334      DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode,
335          Configuration conf, FileSystem.Statistics stats)
336        throws IOException {
337        // Copy only the required DFSClient configuration
338        this.dfsClientConf = new Conf(conf);
339        this.conf = conf;
340        this.stats = stats;
341        this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
342        this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
343    
344        // The hdfsTimeout is currently the same as the ipc timeout 
345        this.hdfsTimeout = Client.getTimeout(conf);
346        this.ugi = UserGroupInformation.getCurrentUser();
347        
348        final String authority = nameNodeUri == null? "null": nameNodeUri.getAuthority();
349        this.leaserenewer = LeaseRenewer.getInstance(authority, ugi, this);
350        this.clientName = leaserenewer.getClientName(dfsClientConf.taskId);
351        
352        this.socketCache = new SocketCache(dfsClientConf.socketCacheCapacity);
353        
354        
355        if (rpcNamenode != null) {
356          // This case is used for testing.
357          Preconditions.checkArgument(nameNodeUri == null);
358          this.namenode = rpcNamenode;
359          dtService = null;
360        } else {
361          Preconditions.checkArgument(nameNodeUri != null,
362              "null URI");
363          NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo =
364            NameNodeProxies.createProxy(conf, nameNodeUri, ClientProtocol.class);
365          this.dtService = proxyInfo.getDelegationTokenService();
366          this.namenode = proxyInfo.getProxy();
367        }
368    
369        // read directly from the block file if configured.
370        this.shortCircuitLocalReads = conf.getBoolean(
371            DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY,
372            DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_DEFAULT);
373        if (LOG.isDebugEnabled()) {
374          LOG.debug("Short circuit read is " + shortCircuitLocalReads);
375        }
376        String localInterfaces[] =
377          conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES);
378        localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces);
379        if (LOG.isDebugEnabled() && 0 != localInterfaces.length) {
380          LOG.debug("Using local interfaces [" +
381          Joiner.on(',').join(localInterfaces)+ "] with addresses [" +
382          Joiner.on(',').join(localInterfaceAddrs) + "]");
383        }
384      }
385    
386      /**
387       * Return the socket addresses to use with each configured
388       * local interface. Local interfaces may be specified by IP
389       * address, IP address range using CIDR notation, interface
390       * name (e.g. eth0) or sub-interface name (e.g. eth0:0).
391       * The socket addresses consist of the IPs for the interfaces
392       * and the ephemeral port (port 0). If an IP, IP range, or
393       * interface name matches an interface with sub-interfaces
394       * only the IP of the interface is used. Sub-interfaces can
395       * be used by specifying them explicitly (by IP or name).
396       * 
397       * @return SocketAddresses for the configured local interfaces,
398       *    or an empty array if none are configured
399       * @throws UnknownHostException if a given interface name is invalid
400       */
401      private static SocketAddress[] getLocalInterfaceAddrs(
402          String interfaceNames[]) throws UnknownHostException {
403        List<SocketAddress> localAddrs = new ArrayList<SocketAddress>();
404        for (String interfaceName : interfaceNames) {
405          if (InetAddresses.isInetAddress(interfaceName)) {
406            localAddrs.add(new InetSocketAddress(interfaceName, 0));
407          } else if (NetUtils.isValidSubnet(interfaceName)) {
408            for (InetAddress addr : NetUtils.getIPs(interfaceName, false)) {
409              localAddrs.add(new InetSocketAddress(addr, 0));
410            }
411          } else {
412            for (String ip : DNS.getIPs(interfaceName, false)) {
413              localAddrs.add(new InetSocketAddress(ip, 0));
414            }
415          }
416        }
417        return localAddrs.toArray(new SocketAddress[localAddrs.size()]);
418      }
419    
420      /**
421       * Select one of the configured local interfaces at random. We use a random
422       * interface because other policies like round-robin are less effective
423       * given that we cache connections to datanodes.
424       *
425       * @return one of the local interface addresses at random, or null if no
426       *    local interfaces are configured
427       */
428      SocketAddress getRandomLocalInterfaceAddr() {
429        if (localInterfaceAddrs.length == 0) {
430          return null;
431        }
432        final int idx = r.nextInt(localInterfaceAddrs.length);
433        final SocketAddress addr = localInterfaceAddrs[idx];
434        if (LOG.isDebugEnabled()) {
435          LOG.debug("Using local interface " + addr);
436        }
437        return addr;
438      }
439    
440      /**
441       * Return the number of times the client should go back to the namenode
442       * to retrieve block locations when reading.
443       */
444      int getMaxBlockAcquireFailures() {
445        return dfsClientConf.maxBlockAcquireFailures;
446      }
447    
448      /**
449       * Return the timeout that clients should use when writing to datanodes.
450       * @param numNodes the number of nodes in the pipeline.
451       */
452      int getDatanodeWriteTimeout(int numNodes) {
453        return (dfsClientConf.confTime > 0) ?
454          (dfsClientConf.confTime + HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * numNodes) : 0;
455      }
456    
457      int getDatanodeReadTimeout(int numNodes) {
458        return dfsClientConf.socketTimeout > 0 ?
459            (HdfsServerConstants.READ_TIMEOUT_EXTENSION * numNodes +
460                dfsClientConf.socketTimeout) : 0;
461      }
462      
463      int getHdfsTimeout() {
464        return hdfsTimeout;
465      }
466      
467      String getClientName() {
468        return clientName;
469      }
470    
471      void checkOpen() throws IOException {
472        if (!clientRunning) {
473          IOException result = new IOException("Filesystem closed");
474          throw result;
475        }
476      }
477    
478      /** Put a file. */
479      void putFileBeingWritten(final String src, final DFSOutputStream out) {
480        synchronized(filesBeingWritten) {
481          filesBeingWritten.put(src, out);
482          // update the last lease renewal time only when there was no
483          // writes. once there is one write stream open, the lease renewer
484          // thread keeps it updated well with in anyone's expiration time.
485          if (lastLeaseRenewal == 0) {
486            updateLastLeaseRenewal();
487          }
488        }
489      }
490    
491      /** Remove a file. */
492      void removeFileBeingWritten(final String src) {
493        synchronized(filesBeingWritten) {
494          filesBeingWritten.remove(src);
495          if (filesBeingWritten.isEmpty()) {
496            lastLeaseRenewal = 0;
497          }
498        }
499      }
500    
501      /** Is file-being-written map empty? */
502      boolean isFilesBeingWrittenEmpty() {
503        synchronized(filesBeingWritten) {
504          return filesBeingWritten.isEmpty();
505        }
506      }
507      
508      /** @return true if the client is running */
509      boolean isClientRunning() {
510        return clientRunning;
511      }
512    
513      long getLastLeaseRenewal() {
514        return lastLeaseRenewal;
515      }
516    
517      void updateLastLeaseRenewal() {
518        synchronized(filesBeingWritten) {
519          if (filesBeingWritten.isEmpty()) {
520            return;
521          }
522          lastLeaseRenewal = System.currentTimeMillis();
523        }
524      }
525    
526      /**
527       * Renew leases.
528       * @return true if lease was renewed. May return false if this
529       * client has been closed or has no files open.
530       **/
531      boolean renewLease() throws IOException {
532        if (clientRunning && !isFilesBeingWrittenEmpty()) {
533          try {
534            namenode.renewLease(clientName);
535            updateLastLeaseRenewal();
536            return true;
537          } catch (IOException e) {
538            // Abort if the lease has already expired. 
539            final long elapsed = System.currentTimeMillis() - getLastLeaseRenewal();
540            if (elapsed > HdfsConstants.LEASE_SOFTLIMIT_PERIOD) {
541              LOG.warn("Failed to renew lease for " + clientName + " for "
542                  + (elapsed/1000) + " seconds (>= soft-limit ="
543                  + (HdfsConstants.LEASE_SOFTLIMIT_PERIOD/1000) + " seconds.) "
544                  + "Closing all files being written ...", e);
545              closeAllFilesBeingWritten(true);
546            } else {
547              // Let the lease renewer handle it and retry.
548              throw e;
549            }
550          }
551        }
552        return false;
553      }
554      
555      /**
556       * Close connections the Namenode.
557       */
558      void closeConnectionToNamenode() {
559        RPC.stopProxy(namenode);
560      }
561      
562      /** Abort and release resources held.  Ignore all errors. */
563      void abort() {
564        clientRunning = false;
565        closeAllFilesBeingWritten(true);
566        socketCache.clear();
567        closeConnectionToNamenode();
568      }
569    
570      /** Close/abort all files being written. */
571      private void closeAllFilesBeingWritten(final boolean abort) {
572        for(;;) {
573          final String src;
574          final DFSOutputStream out;
575          synchronized(filesBeingWritten) {
576            if (filesBeingWritten.isEmpty()) {
577              return;
578            }
579            src = filesBeingWritten.keySet().iterator().next();
580            out = filesBeingWritten.remove(src);
581          }
582          if (out != null) {
583            try {
584              if (abort) {
585                out.abort();
586              } else {
587                out.close();
588              }
589            } catch(IOException ie) {
590              LOG.error("Failed to " + (abort? "abort": "close") + " file " + src,
591                  ie);
592            }
593          }
594        }
595      }
596    
597      /**
598       * Close the file system, abandoning all of the leases and files being
599       * created and close connections to the namenode.
600       */
601      public synchronized void close() throws IOException {
602        if(clientRunning) {
603          closeAllFilesBeingWritten(false);
604          socketCache.clear();
605          clientRunning = false;
606          leaserenewer.closeClient(this);
607          // close connections to the namenode
608          closeConnectionToNamenode();
609        }
610      }
611    
612      /**
613       * Get the default block size for this cluster
614       * @return the default block size in bytes
615       */
616      public long getDefaultBlockSize() {
617        return dfsClientConf.defaultBlockSize;
618      }
619        
620      /**
621       * @see ClientProtocol#getPreferredBlockSize(String)
622       */
623      public long getBlockSize(String f) throws IOException {
624        try {
625          return namenode.getPreferredBlockSize(f);
626        } catch (IOException ie) {
627          LOG.warn("Problem getting block size", ie);
628          throw ie;
629        }
630      }
631    
632      /**
633       * Get server default values for a number of configuration params.
634       * @see ClientProtocol#getServerDefaults()
635       */
636      public FsServerDefaults getServerDefaults() throws IOException {
637        long now = System.currentTimeMillis();
638        if (now - serverDefaultsLastUpdate > SERVER_DEFAULTS_VALIDITY_PERIOD) {
639          serverDefaults = namenode.getServerDefaults();
640          serverDefaultsLastUpdate = now;
641        }
642        return serverDefaults;
643      }
644      
645      /**
646       * Get a canonical token service name for this client's tokens.  Null should
647       * be returned if the client is not using tokens.
648       * @return the token service for the client
649       */
650      @InterfaceAudience.LimitedPrivate( { "HDFS" }) 
651      public String getCanonicalServiceName() {
652        return (dtService != null) ? dtService.toString() : null;
653      }
654      
655      /**
656       * @see ClientProtocol#getDelegationToken(Text)
657       */
658      public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
659          throws IOException {
660        assert dtService != null;
661        Token<DelegationTokenIdentifier> token =
662          namenode.getDelegationToken(renewer);
663        token.setService(this.dtService);
664    
665        LOG.info("Created " + DelegationTokenIdentifier.stringifyToken(token));
666        return token;
667      }
668    
669      /**
670       * Renew a delegation token
671       * @param token the token to renew
672       * @return the new expiration time
673       * @throws InvalidToken
674       * @throws IOException
675       * @deprecated Use Token.renew instead.
676       */
677      public long renewDelegationToken(Token<DelegationTokenIdentifier> token)
678          throws InvalidToken, IOException {
679        LOG.info("Renewing " + DelegationTokenIdentifier.stringifyToken(token));
680        try {
681          return token.renew(conf);
682        } catch (InterruptedException ie) {                                       
683          throw new RuntimeException("caught interrupted", ie);
684        } catch (RemoteException re) {
685          throw re.unwrapRemoteException(InvalidToken.class,
686                                         AccessControlException.class);
687        }
688      }
689    
690      /**
691       * Get {@link BlockReader} for short circuited local reads.
692       */
693      static BlockReader getLocalBlockReader(Configuration conf,
694          String src, ExtendedBlock blk, Token<BlockTokenIdentifier> accessToken,
695          DatanodeInfo chosenNode, int socketTimeout, long offsetIntoBlock)
696          throws InvalidToken, IOException {
697        try {
698          return BlockReaderLocal.newBlockReader(conf, src, blk, accessToken,
699              chosenNode, socketTimeout, offsetIntoBlock, blk.getNumBytes()
700                  - offsetIntoBlock);
701        } catch (RemoteException re) {
702          throw re.unwrapRemoteException(InvalidToken.class,
703              AccessControlException.class);
704        }
705      }
706      
707      private static Map<String, Boolean> localAddrMap = Collections
708          .synchronizedMap(new HashMap<String, Boolean>());
709      
710      private static boolean isLocalAddress(InetSocketAddress targetAddr) {
711        InetAddress addr = targetAddr.getAddress();
712        Boolean cached = localAddrMap.get(addr.getHostAddress());
713        if (cached != null) {
714          if (LOG.isTraceEnabled()) {
715            LOG.trace("Address " + targetAddr +
716                      (cached ? " is local" : " is not local"));
717          }
718          return cached;
719        }
720    
721        // Check if the address is any local or loop back
722        boolean local = addr.isAnyLocalAddress() || addr.isLoopbackAddress();
723    
724        // Check if the address is defined on any interface
725        if (!local) {
726          try {
727            local = NetworkInterface.getByInetAddress(addr) != null;
728          } catch (SocketException e) {
729            local = false;
730          }
731        }
732        if (LOG.isTraceEnabled()) {
733          LOG.trace("Address " + targetAddr +
734                    (local ? " is local" : " is not local"));
735        }
736        localAddrMap.put(addr.getHostAddress(), local);
737        return local;
738      }
739      
740      /**
741       * Should the block access token be refetched on an exception
742       * 
743       * @param ex Exception received
744       * @param targetAddr Target datanode address from where exception was received
745       * @return true if block access token has expired or invalid and it should be
746       *         refetched
747       */
748      private static boolean tokenRefetchNeeded(IOException ex,
749          InetSocketAddress targetAddr) {
750        /*
751         * Get a new access token and retry. Retry is needed in 2 cases. 1) When
752         * both NN and DN re-started while DFSClient holding a cached access token.
753         * 2) In the case that NN fails to update its access key at pre-set interval
754         * (by a wide margin) and subsequently restarts. In this case, DN
755         * re-registers itself with NN and receives a new access key, but DN will
756         * delete the old access key from its memory since it's considered expired
757         * based on the estimated expiration date.
758         */
759        if (ex instanceof InvalidBlockTokenException || ex instanceof InvalidToken) {
760          LOG.info("Access token was invalid when connecting to " + targetAddr
761              + " : " + ex);
762          return true;
763        }
764        return false;
765      }
766      
767      /**
768       * Cancel a delegation token
769       * @param token the token to cancel
770       * @throws InvalidToken
771       * @throws IOException
772       * @deprecated Use Token.cancel instead.
773       */
774      public void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
775          throws InvalidToken, IOException {
776        LOG.info("Cancelling " + DelegationTokenIdentifier.stringifyToken(token));
777        try {
778          token.cancel(conf);
779         } catch (InterruptedException ie) {                                       
780          throw new RuntimeException("caught interrupted", ie);
781        } catch (RemoteException re) {
782          throw re.unwrapRemoteException(InvalidToken.class,
783                                         AccessControlException.class);
784        }
785      }
786      
787      @InterfaceAudience.Private
788      public static class Renewer extends TokenRenewer {
789        
790        static {
791          //Ensure that HDFS Configuration files are loaded before trying to use
792          // the renewer.
793          HdfsConfiguration.init();
794        }
795        
796        @Override
797        public boolean handleKind(Text kind) {
798          return DelegationTokenIdentifier.HDFS_DELEGATION_KIND.equals(kind);
799        }
800    
801        @SuppressWarnings("unchecked")
802        @Override
803        public long renew(Token<?> token, Configuration conf) throws IOException {
804          Token<DelegationTokenIdentifier> delToken = 
805            (Token<DelegationTokenIdentifier>) token;
806          ClientProtocol nn = getNNProxy(delToken, conf);
807          try {
808            return nn.renewDelegationToken(delToken);
809          } catch (RemoteException re) {
810            throw re.unwrapRemoteException(InvalidToken.class, 
811                                           AccessControlException.class);
812          }
813        }
814    
815        @SuppressWarnings("unchecked")
816        @Override
817        public void cancel(Token<?> token, Configuration conf) throws IOException {
818          Token<DelegationTokenIdentifier> delToken = 
819              (Token<DelegationTokenIdentifier>) token;
820          LOG.info("Cancelling " + 
821                   DelegationTokenIdentifier.stringifyToken(delToken));
822          ClientProtocol nn = getNNProxy(delToken, conf);
823          try {
824            nn.cancelDelegationToken(delToken);
825          } catch (RemoteException re) {
826            throw re.unwrapRemoteException(InvalidToken.class,
827                AccessControlException.class);
828          }
829        }
830        
831        private static ClientProtocol getNNProxy(
832            Token<DelegationTokenIdentifier> token, Configuration conf)
833            throws IOException {
834          URI uri = HAUtil.getServiceUriFromToken(token);
835          if (HAUtil.isTokenForLogicalUri(token) &&
836              !HAUtil.isLogicalUri(conf, uri)) {
837            // If the token is for a logical nameservice, but the configuration
838            // we have disagrees about that, we can't actually renew it.
839            // This can be the case in MR, for example, if the RM doesn't
840            // have all of the HA clusters configured in its configuration.
841            throw new IOException("Unable to map logical nameservice URI '" +
842                uri + "' to a NameNode. Local configuration does not have " +
843                "a failover proxy provider configured.");
844          }
845          
846          NameNodeProxies.ProxyAndInfo<ClientProtocol> info =
847            NameNodeProxies.createProxy(conf, uri, ClientProtocol.class);
848          assert info.getDelegationTokenService().equals(token.getService()) :
849            "Returned service '" + info.getDelegationTokenService().toString() +
850            "' doesn't match expected service '" +
851            token.getService().toString() + "'";
852            
853          return info.getProxy();
854        }
855    
856        @Override
857        public boolean isManaged(Token<?> token) throws IOException {
858          return true;
859        }
860        
861      }
862    
863      /**
864       * Report corrupt blocks that were discovered by the client.
865       * @see ClientProtocol#reportBadBlocks(LocatedBlock[])
866       */
867      public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
868        namenode.reportBadBlocks(blocks);
869      }
870      
871      public short getDefaultReplication() {
872        return dfsClientConf.defaultReplication;
873      }
874    
875      /**
876       * @see ClientProtocol#getBlockLocations(String, long, long)
877       */
878      static LocatedBlocks callGetBlockLocations(ClientProtocol namenode,
879          String src, long start, long length) 
880          throws IOException {
881        try {
882          return namenode.getBlockLocations(src, start, length);
883        } catch(RemoteException re) {
884          throw re.unwrapRemoteException(AccessControlException.class,
885                                         FileNotFoundException.class,
886                                         UnresolvedPathException.class);
887        }
888      }
889    
890      /**
891       * Recover a file's lease
892       * @param src a file's path
893       * @return true if the file is already closed
894       * @throws IOException
895       */
896      boolean recoverLease(String src) throws IOException {
897        checkOpen();
898    
899        try {
900          return namenode.recoverLease(src, clientName);
901        } catch (RemoteException re) {
902          throw re.unwrapRemoteException(FileNotFoundException.class,
903                                         AccessControlException.class);
904        }
905      }
906    
907      /**
908       * Get block location info about file
909       * 
910       * getBlockLocations() returns a list of hostnames that store 
911       * data for a specific file region.  It returns a set of hostnames
912       * for every block within the indicated region.
913       *
914       * This function is very useful when writing code that considers
915       * data-placement when performing operations.  For example, the
916       * MapReduce system tries to schedule tasks on the same machines
917       * as the data-block the task processes. 
918       */
919      public BlockLocation[] getBlockLocations(String src, long start, 
920        long length) throws IOException, UnresolvedLinkException {
921        LocatedBlocks blocks = callGetBlockLocations(namenode, src, start, length);
922        return DFSUtil.locatedBlocks2Locations(blocks);
923      }
924      
925      public DFSInputStream open(String src) 
926          throws IOException, UnresolvedLinkException {
927        return open(src, dfsClientConf.ioBufferSize, true, null);
928      }
929    
930      /**
931       * Create an input stream that obtains a nodelist from the
932       * namenode, and then reads from all the right places.  Creates
933       * inner subclass of InputStream that does the right out-of-band
934       * work.
935       * @deprecated Use {@link #open(String, int, boolean)} instead.
936       */
937      @Deprecated
938      public DFSInputStream open(String src, int buffersize, boolean verifyChecksum,
939                                 FileSystem.Statistics stats)
940          throws IOException, UnresolvedLinkException {
941        return open(src, buffersize, verifyChecksum);
942      }
943      
944    
945      /**
946       * Create an input stream that obtains a nodelist from the
947       * namenode, and then reads from all the right places.  Creates
948       * inner subclass of InputStream that does the right out-of-band
949       * work.
950       */
951      public DFSInputStream open(String src, int buffersize, boolean verifyChecksum)
952          throws IOException, UnresolvedLinkException {
953        checkOpen();
954        //    Get block info from namenode
955        return new DFSInputStream(this, src, buffersize, verifyChecksum);
956      }
957    
958      /**
959       * Get the namenode associated with this DFSClient object
960       * @return the namenode associated with this DFSClient object
961       */
962      public ClientProtocol getNamenode() {
963        return namenode;
964      }
965      
966      /**
967       * Call {@link #create(String, boolean, short, long, Progressable)} with
968       * default <code>replication</code> and <code>blockSize<code> and null <code>
969       * progress</code>.
970       */
971      public OutputStream create(String src, boolean overwrite) 
972          throws IOException {
973        return create(src, overwrite, dfsClientConf.defaultReplication,
974            dfsClientConf.defaultBlockSize, null);
975      }
976        
977      /**
978       * Call {@link #create(String, boolean, short, long, Progressable)} with
979       * default <code>replication</code> and <code>blockSize<code>.
980       */
981      public OutputStream create(String src, 
982                                 boolean overwrite,
983                                 Progressable progress) throws IOException {
984        return create(src, overwrite, dfsClientConf.defaultReplication,
985            dfsClientConf.defaultBlockSize, progress);
986      }
987        
988      /**
989       * Call {@link #create(String, boolean, short, long, Progressable)} with
990       * null <code>progress</code>.
991       */
992      public OutputStream create(String src, 
993                                 boolean overwrite, 
994                                 short replication,
995                                 long blockSize) throws IOException {
996        return create(src, overwrite, replication, blockSize, null);
997      }
998    
999      /**
1000       * Call {@link #create(String, boolean, short, long, Progressable, int)}
1001       * with default bufferSize.
1002       */
1003      public OutputStream create(String src, boolean overwrite, short replication,
1004          long blockSize, Progressable progress) throws IOException {
1005        return create(src, overwrite, replication, blockSize, progress,
1006            dfsClientConf.ioBufferSize);
1007      }
1008    
1009      /**
1010       * Call {@link #create(String, FsPermission, EnumSet, short, long, 
1011       * Progressable, int)} with default <code>permission</code>
1012       * {@link FsPermission#getDefault()}.
1013       * 
1014       * @param src File name
1015       * @param overwrite overwrite an existing file if true
1016       * @param replication replication factor for the file
1017       * @param blockSize maximum block size
1018       * @param progress interface for reporting client progress
1019       * @param buffersize underlying buffersize
1020       * 
1021       * @return output stream
1022       */
1023      public OutputStream create(String src,
1024                                 boolean overwrite,
1025                                 short replication,
1026                                 long blockSize,
1027                                 Progressable progress,
1028                                 int buffersize)
1029          throws IOException {
1030        return create(src, FsPermission.getDefault(),
1031            overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
1032                : EnumSet.of(CreateFlag.CREATE), replication, blockSize, progress,
1033            buffersize);
1034      }
1035    
1036      /**
1037       * Call {@link #create(String, FsPermission, EnumSet, boolean, short, 
1038       * long, Progressable, int)} with <code>createParent</code> set to true.
1039       */
1040      public DFSOutputStream create(String src, 
1041                                 FsPermission permission,
1042                                 EnumSet<CreateFlag> flag, 
1043                                 short replication,
1044                                 long blockSize,
1045                                 Progressable progress,
1046                                 int buffersize)
1047          throws IOException {
1048        return create(src, permission, flag, true,
1049            replication, blockSize, progress, buffersize);
1050      }
1051    
1052      /**
1053       * Create a new dfs file with the specified block replication 
1054       * with write-progress reporting and return an output stream for writing
1055       * into the file.  
1056       * 
1057       * @param src File name
1058       * @param permission The permission of the directory being created.
1059       *          If null, use default permission {@link FsPermission#getDefault()}
1060       * @param flag indicates create a new file or create/overwrite an
1061       *          existing file or append to an existing file
1062       * @param createParent create missing parent directory if true
1063       * @param replication block replication
1064       * @param blockSize maximum block size
1065       * @param progress interface for reporting client progress
1066       * @param buffersize underlying buffer size 
1067       * 
1068       * @return output stream
1069       * 
1070       * @see ClientProtocol#create(String, FsPermission, String, EnumSetWritable,
1071       * boolean, short, long) for detailed description of exceptions thrown
1072       */
1073      public DFSOutputStream create(String src, 
1074                                 FsPermission permission,
1075                                 EnumSet<CreateFlag> flag, 
1076                                 boolean createParent,
1077                                 short replication,
1078                                 long blockSize,
1079                                 Progressable progress,
1080                                 int buffersize)
1081        throws IOException {
1082        checkOpen();
1083        if (permission == null) {
1084          permission = FsPermission.getDefault();
1085        }
1086        FsPermission masked = permission.applyUMask(dfsClientConf.uMask);
1087        if(LOG.isDebugEnabled()) {
1088          LOG.debug(src + ": masked=" + masked);
1089        }
1090        final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
1091            src, masked, flag, createParent, replication, blockSize, progress,
1092            buffersize, dfsClientConf.createChecksum());
1093        leaserenewer.put(src, result, this);
1094        return result;
1095      }
1096      
1097      /**
1098       * Append to an existing file if {@link CreateFlag#APPEND} is present
1099       */
1100      private DFSOutputStream primitiveAppend(String src, EnumSet<CreateFlag> flag,
1101          int buffersize, Progressable progress) throws IOException {
1102        if (flag.contains(CreateFlag.APPEND)) {
1103          HdfsFileStatus stat = getFileInfo(src);
1104          if (stat == null) { // No file to append to
1105            // New file needs to be created if create option is present
1106            if (!flag.contains(CreateFlag.CREATE)) {
1107              throw new FileNotFoundException("failed to append to non-existent file "
1108                  + src + " on client " + clientName);
1109            }
1110            return null;
1111          }
1112          return callAppend(stat, src, buffersize, progress);
1113        }
1114        return null;
1115      }
1116      
1117      /**
1118       * Same as {{@link #create(String, FsPermission, EnumSet, short, long,
1119       *  Progressable, int)} except that the permission
1120       *  is absolute (ie has already been masked with umask.
1121       */
1122      public DFSOutputStream primitiveCreate(String src, 
1123                                 FsPermission absPermission,
1124                                 EnumSet<CreateFlag> flag,
1125                                 boolean createParent,
1126                                 short replication,
1127                                 long blockSize,
1128                                 Progressable progress,
1129                                 int buffersize,
1130                                 int bytesPerChecksum)
1131          throws IOException, UnresolvedLinkException {
1132        checkOpen();
1133        CreateFlag.validate(flag);
1134        DFSOutputStream result = primitiveAppend(src, flag, buffersize, progress);
1135        if (result == null) {
1136          DataChecksum checksum = DataChecksum.newDataChecksum(
1137              dfsClientConf.checksumType,
1138              bytesPerChecksum);
1139          result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
1140              flag, createParent, replication, blockSize, progress, buffersize,
1141              checksum);
1142        }
1143        leaserenewer.put(src, result, this);
1144        return result;
1145      }
1146      
1147      /**
1148       * Creates a symbolic link.
1149       * 
1150       * @see ClientProtocol#createSymlink(String, String,FsPermission, boolean) 
1151       */
1152      public void createSymlink(String target, String link, boolean createParent)
1153          throws IOException {
1154        try {
1155          FsPermission dirPerm = 
1156              FsPermission.getDefault().applyUMask(dfsClientConf.uMask); 
1157          namenode.createSymlink(target, link, dirPerm, createParent);
1158        } catch (RemoteException re) {
1159          throw re.unwrapRemoteException(AccessControlException.class,
1160                                         FileAlreadyExistsException.class, 
1161                                         FileNotFoundException.class,
1162                                         ParentNotDirectoryException.class,
1163                                         NSQuotaExceededException.class, 
1164                                         DSQuotaExceededException.class,
1165                                         UnresolvedPathException.class);
1166        }
1167      }
1168    
1169      /**
1170       * Resolve the *first* symlink, if any, in the path.
1171       * 
1172       * @see ClientProtocol#getLinkTarget(String)
1173       */
1174      public String getLinkTarget(String path) throws IOException { 
1175        checkOpen();
1176        try {
1177          return namenode.getLinkTarget(path);
1178        } catch (RemoteException re) {
1179          throw re.unwrapRemoteException(AccessControlException.class,
1180                                         FileNotFoundException.class);
1181        }
1182      }
1183    
1184      /** Method to get stream returned by append call */
1185      private DFSOutputStream callAppend(HdfsFileStatus stat, String src,
1186          int buffersize, Progressable progress) throws IOException {
1187        LocatedBlock lastBlock = null;
1188        try {
1189          lastBlock = namenode.append(src, clientName);
1190        } catch(RemoteException re) {
1191          throw re.unwrapRemoteException(AccessControlException.class,
1192                                         FileNotFoundException.class,
1193                                         SafeModeException.class,
1194                                         DSQuotaExceededException.class,
1195                                         UnsupportedOperationException.class,
1196                                         UnresolvedPathException.class);
1197        }
1198        return DFSOutputStream.newStreamForAppend(this, src, buffersize, progress,
1199            lastBlock, stat, dfsClientConf.createChecksum());
1200      }
1201      
1202      /**
1203       * Append to an existing HDFS file.  
1204       * 
1205       * @param src file name
1206       * @param buffersize buffer size
1207       * @param progress for reporting write-progress; null is acceptable.
1208       * @param statistics file system statistics; null is acceptable.
1209       * @return an output stream for writing into the file
1210       * 
1211       * @see ClientProtocol#append(String, String) 
1212       */
1213      public HdfsDataOutputStream append(final String src, final int buffersize,
1214          final Progressable progress, final FileSystem.Statistics statistics
1215          ) throws IOException {
1216        final DFSOutputStream out = append(src, buffersize, progress);
1217        return new HdfsDataOutputStream(out, statistics, out.getInitialLen());
1218      }
1219    
1220      private DFSOutputStream append(String src, int buffersize, Progressable progress) 
1221          throws IOException {
1222        checkOpen();
1223        HdfsFileStatus stat = getFileInfo(src);
1224        if (stat == null) { // No file found
1225          throw new FileNotFoundException("failed to append to non-existent file "
1226              + src + " on client " + clientName);
1227        }
1228        final DFSOutputStream result = callAppend(stat, src, buffersize, progress);
1229        leaserenewer.put(src, result, this);
1230        return result;
1231      }
1232    
1233      /**
1234       * Set replication for an existing file.
1235       * @param src file name
1236       * @param replication
1237       * 
1238       * @see ClientProtocol#setReplication(String, short)
1239       */
1240      public boolean setReplication(String src, short replication)
1241          throws IOException {
1242        try {
1243          return namenode.setReplication(src, replication);
1244        } catch(RemoteException re) {
1245          throw re.unwrapRemoteException(AccessControlException.class,
1246                                         FileNotFoundException.class,
1247                                         SafeModeException.class,
1248                                         DSQuotaExceededException.class,
1249                                         UnresolvedPathException.class);
1250        }
1251      }
1252    
1253      /**
1254       * Rename file or directory.
1255       * @see ClientProtocol#rename(String, String)
1256       * @deprecated Use {@link #rename(String, String, Options.Rename...)} instead.
1257       */
1258      @Deprecated
1259      public boolean rename(String src, String dst) throws IOException {
1260        checkOpen();
1261        try {
1262          return namenode.rename(src, dst);
1263        } catch(RemoteException re) {
1264          throw re.unwrapRemoteException(AccessControlException.class,
1265                                         NSQuotaExceededException.class,
1266                                         DSQuotaExceededException.class,
1267                                         UnresolvedPathException.class);
1268        }
1269      }
1270    
1271      /**
1272       * Move blocks from src to trg and delete src
1273       * See {@link ClientProtocol#concat(String, String [])}. 
1274       */
1275      public void concat(String trg, String [] srcs) throws IOException {
1276        checkOpen();
1277        try {
1278          namenode.concat(trg, srcs);
1279        } catch(RemoteException re) {
1280          throw re.unwrapRemoteException(AccessControlException.class,
1281                                         UnresolvedPathException.class);
1282        }
1283      }
1284      /**
1285       * Rename file or directory.
1286       * @see ClientProtocol#rename2(String, String, Options.Rename...)
1287       */
1288      public void rename(String src, String dst, Options.Rename... options)
1289          throws IOException {
1290        checkOpen();
1291        try {
1292          namenode.rename2(src, dst, options);
1293        } catch(RemoteException re) {
1294          throw re.unwrapRemoteException(AccessControlException.class,
1295                                         DSQuotaExceededException.class,
1296                                         FileAlreadyExistsException.class,
1297                                         FileNotFoundException.class,
1298                                         ParentNotDirectoryException.class,
1299                                         SafeModeException.class,
1300                                         NSQuotaExceededException.class,
1301                                         UnresolvedPathException.class);
1302        }
1303      }
1304      /**
1305       * Delete file or directory.
1306       * See {@link ClientProtocol#delete(String)}. 
1307       */
1308      @Deprecated
1309      public boolean delete(String src) throws IOException {
1310        checkOpen();
1311        return namenode.delete(src, true);
1312      }
1313    
1314      /**
1315       * delete file or directory.
1316       * delete contents of the directory if non empty and recursive 
1317       * set to true
1318       *
1319       * @see ClientProtocol#delete(String, boolean)
1320       */
1321      public boolean delete(String src, boolean recursive) throws IOException {
1322        checkOpen();
1323        try {
1324          return namenode.delete(src, recursive);
1325        } catch(RemoteException re) {
1326          throw re.unwrapRemoteException(AccessControlException.class,
1327                                         FileNotFoundException.class,
1328                                         SafeModeException.class,
1329                                         UnresolvedPathException.class);
1330        }
1331      }
1332      
1333      /** Implemented using getFileInfo(src)
1334       */
1335      public boolean exists(String src) throws IOException {
1336        checkOpen();
1337        return getFileInfo(src) != null;
1338      }
1339    
1340      /**
1341       * Get a partial listing of the indicated directory
1342       * No block locations need to be fetched
1343       */
1344      public DirectoryListing listPaths(String src,  byte[] startAfter)
1345        throws IOException {
1346        return listPaths(src, startAfter, false);
1347      }
1348      
1349      /**
1350       * Get a partial listing of the indicated directory
1351       *
1352       * Recommend to use HdfsFileStatus.EMPTY_NAME as startAfter
1353       * if the application wants to fetch a listing starting from
1354       * the first entry in the directory
1355       *
1356       * @see ClientProtocol#getListing(String, byte[], boolean)
1357       */
1358      public DirectoryListing listPaths(String src,  byte[] startAfter,
1359          boolean needLocation) 
1360        throws IOException {
1361        checkOpen();
1362        try {
1363          return namenode.getListing(src, startAfter, needLocation);
1364        } catch(RemoteException re) {
1365          throw re.unwrapRemoteException(AccessControlException.class,
1366                                         FileNotFoundException.class,
1367                                         UnresolvedPathException.class);
1368        }
1369      }
1370    
1371      /**
1372       * Get the file info for a specific file or directory.
1373       * @param src The string representation of the path to the file
1374       * @return object containing information regarding the file
1375       *         or null if file not found
1376       *         
1377       * @see ClientProtocol#getFileInfo(String) for description of exceptions
1378       */
1379      public HdfsFileStatus getFileInfo(String src) throws IOException {
1380        checkOpen();
1381        try {
1382          return namenode.getFileInfo(src);
1383        } catch(RemoteException re) {
1384          throw re.unwrapRemoteException(AccessControlException.class,
1385                                         FileNotFoundException.class,
1386                                         UnresolvedPathException.class);
1387        }
1388      }
1389    
1390      /**
1391       * Get the file info for a specific file or directory. If src
1392       * refers to a symlink then the FileStatus of the link is returned.
1393       * @param src path to a file or directory.
1394       * 
1395       * For description of exceptions thrown 
1396       * @see ClientProtocol#getFileLinkInfo(String)
1397       */
1398      public HdfsFileStatus getFileLinkInfo(String src) throws IOException {
1399        checkOpen();
1400        try {
1401          return namenode.getFileLinkInfo(src);
1402        } catch(RemoteException re) {
1403          throw re.unwrapRemoteException(AccessControlException.class,
1404                                         UnresolvedPathException.class);
1405         }
1406       }
1407    
1408      /**
1409       * Get the checksum of a file.
1410       * @param src The file path
1411       * @return The checksum 
1412       * @see DistributedFileSystem#getFileChecksum(Path)
1413       */
1414      public MD5MD5CRC32FileChecksum getFileChecksum(String src) throws IOException {
1415        checkOpen();
1416        return getFileChecksum(src, namenode, socketFactory, dfsClientConf.socketTimeout);    
1417      }
1418    
1419      /**
1420       * Get the checksum of a file.
1421       * @param src The file path
1422       * @return The checksum 
1423       */
1424      public static MD5MD5CRC32FileChecksum getFileChecksum(String src,
1425          ClientProtocol namenode, SocketFactory socketFactory, int socketTimeout
1426          ) throws IOException {
1427        //get all block locations
1428        LocatedBlocks blockLocations = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE);
1429        if (null == blockLocations) {
1430          throw new FileNotFoundException("File does not exist: " + src);
1431        }
1432        List<LocatedBlock> locatedblocks = blockLocations.getLocatedBlocks();
1433        final DataOutputBuffer md5out = new DataOutputBuffer();
1434        int bytesPerCRC = 0;
1435        long crcPerBlock = 0;
1436        boolean refetchBlocks = false;
1437        int lastRetriedIndex = -1;
1438    
1439        //get block checksum for each block
1440        for(int i = 0; i < locatedblocks.size(); i++) {
1441          if (refetchBlocks) {  // refetch to get fresh tokens
1442            blockLocations = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE);
1443            if (null == blockLocations) {
1444              throw new FileNotFoundException("File does not exist: " + src);
1445            }
1446            locatedblocks = blockLocations.getLocatedBlocks();
1447            refetchBlocks = false;
1448          }
1449          LocatedBlock lb = locatedblocks.get(i);
1450          final ExtendedBlock block = lb.getBlock();
1451          final DatanodeInfo[] datanodes = lb.getLocations();
1452          
1453          //try each datanode location of the block
1454          final int timeout = 3000 * datanodes.length + socketTimeout;
1455          boolean done = false;
1456          for(int j = 0; !done && j < datanodes.length; j++) {
1457            Socket sock = null;
1458            DataOutputStream out = null;
1459            DataInputStream in = null;
1460            
1461            try {
1462              //connect to a datanode
1463              sock = socketFactory.createSocket();
1464              NetUtils.connect(sock,
1465                  NetUtils.createSocketAddr(datanodes[j].getXferAddr()),
1466                  timeout);
1467              sock.setSoTimeout(timeout);
1468    
1469              out = new DataOutputStream(
1470                  new BufferedOutputStream(NetUtils.getOutputStream(sock), 
1471                                           HdfsConstants.SMALL_BUFFER_SIZE));
1472              in = new DataInputStream(NetUtils.getInputStream(sock));
1473    
1474              if (LOG.isDebugEnabled()) {
1475                LOG.debug("write to " + datanodes[j] + ": "
1476                    + Op.BLOCK_CHECKSUM + ", block=" + block);
1477              }
1478              // get block MD5
1479              new Sender(out).blockChecksum(block, lb.getBlockToken());
1480    
1481              final BlockOpResponseProto reply =
1482                BlockOpResponseProto.parseFrom(HdfsProtoUtil.vintPrefixed(in));
1483    
1484              if (reply.getStatus() != Status.SUCCESS) {
1485                if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN
1486                    && i > lastRetriedIndex) {
1487                  if (LOG.isDebugEnabled()) {
1488                    LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM "
1489                        + "for file " + src + " for block " + block
1490                        + " from datanode " + datanodes[j]
1491                        + ". Will retry the block once.");
1492                  }
1493                  lastRetriedIndex = i;
1494                  done = true; // actually it's not done; but we'll retry
1495                  i--; // repeat at i-th block
1496                  refetchBlocks = true;
1497                  break;
1498                } else {
1499                  throw new IOException("Bad response " + reply + " for block "
1500                      + block + " from datanode " + datanodes[j]);
1501                }
1502              }
1503              
1504              OpBlockChecksumResponseProto checksumData =
1505                reply.getChecksumResponse();
1506    
1507              //read byte-per-checksum
1508              final int bpc = checksumData.getBytesPerCrc();
1509              if (i == 0) { //first block
1510                bytesPerCRC = bpc;
1511              }
1512              else if (bpc != bytesPerCRC) {
1513                throw new IOException("Byte-per-checksum not matched: bpc=" + bpc
1514                    + " but bytesPerCRC=" + bytesPerCRC);
1515              }
1516              
1517              //read crc-per-block
1518              final long cpb = checksumData.getCrcPerBlock();
1519              if (locatedblocks.size() > 1 && i == 0) {
1520                crcPerBlock = cpb;
1521              }
1522    
1523              //read md5
1524              final MD5Hash md5 = new MD5Hash(
1525                  checksumData.getMd5().toByteArray());
1526              md5.write(md5out);
1527              
1528              done = true;
1529    
1530              if (LOG.isDebugEnabled()) {
1531                if (i == 0) {
1532                  LOG.debug("set bytesPerCRC=" + bytesPerCRC
1533                      + ", crcPerBlock=" + crcPerBlock);
1534                }
1535                LOG.debug("got reply from " + datanodes[j] + ": md5=" + md5);
1536              }
1537            } catch (IOException ie) {
1538              LOG.warn("src=" + src + ", datanodes["+j+"]=" + datanodes[j], ie);
1539            } finally {
1540              IOUtils.closeStream(in);
1541              IOUtils.closeStream(out);
1542              IOUtils.closeSocket(sock);        
1543            }
1544          }
1545    
1546          if (!done) {
1547            throw new IOException("Fail to get block MD5 for " + block);
1548          }
1549        }
1550    
1551        //compute file MD5
1552        final MD5Hash fileMD5 = MD5Hash.digest(md5out.getData()); 
1553        return new MD5MD5CRC32FileChecksum(bytesPerCRC, crcPerBlock, fileMD5);
1554      }
1555    
1556      /**
1557       * Set permissions to a file or directory.
1558       * @param src path name.
1559       * @param permission
1560       * 
1561       * @see ClientProtocol#setPermission(String, FsPermission)
1562       */
1563      public void setPermission(String src, FsPermission permission)
1564          throws IOException {
1565        checkOpen();
1566        try {
1567          namenode.setPermission(src, permission);
1568        } catch(RemoteException re) {
1569          throw re.unwrapRemoteException(AccessControlException.class,
1570                                         FileNotFoundException.class,
1571                                         SafeModeException.class,
1572                                         UnresolvedPathException.class);
1573        }
1574      }
1575    
1576      /**
1577       * Set file or directory owner.
1578       * @param src path name.
1579       * @param username user id.
1580       * @param groupname user group.
1581       * 
1582       * @see ClientProtocol#setOwner(String, String, String)
1583       */
1584      public void setOwner(String src, String username, String groupname)
1585          throws IOException {
1586        checkOpen();
1587        try {
1588          namenode.setOwner(src, username, groupname);
1589        } catch(RemoteException re) {
1590          throw re.unwrapRemoteException(AccessControlException.class,
1591                                         FileNotFoundException.class,
1592                                         SafeModeException.class,
1593                                         UnresolvedPathException.class);                                   
1594        }
1595      }
1596    
1597      /**
1598       * @see ClientProtocol#getStats()
1599       */
1600      public FsStatus getDiskStatus() throws IOException {
1601        long rawNums[] = namenode.getStats();
1602        return new FsStatus(rawNums[0], rawNums[1], rawNums[2]);
1603      }
1604    
1605      /**
1606       * Returns count of blocks with no good replicas left. Normally should be 
1607       * zero.
1608       * @throws IOException
1609       */ 
1610      public long getMissingBlocksCount() throws IOException {
1611        return namenode.getStats()[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX];
1612      }
1613      
1614      /**
1615       * Returns count of blocks with one of more replica missing.
1616       * @throws IOException
1617       */ 
1618      public long getUnderReplicatedBlocksCount() throws IOException {
1619        return namenode.getStats()[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX];
1620      }
1621      
1622      /**
1623       * Returns count of blocks with at least one replica marked corrupt. 
1624       * @throws IOException
1625       */ 
1626      public long getCorruptBlocksCount() throws IOException {
1627        return namenode.getStats()[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX];
1628      }
1629      
1630      /**
1631       * @return a list in which each entry describes a corrupt file/block
1632       * @throws IOException
1633       */
1634      public CorruptFileBlocks listCorruptFileBlocks(String path,
1635                                                     String cookie)
1636        throws IOException {
1637        return namenode.listCorruptFileBlocks(path, cookie);
1638      }
1639    
1640      public DatanodeInfo[] datanodeReport(DatanodeReportType type)
1641      throws IOException {
1642        return namenode.getDatanodeReport(type);
1643      }
1644        
1645      /**
1646       * Enter, leave or get safe mode.
1647       * 
1648       * @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction)
1649       */
1650      public boolean setSafeMode(SafeModeAction action) throws IOException {
1651        return namenode.setSafeMode(action);
1652      }
1653    
1654      /**
1655       * Save namespace image.
1656       * 
1657       * @see ClientProtocol#saveNamespace()
1658       */
1659      void saveNamespace() throws AccessControlException, IOException {
1660        try {
1661          namenode.saveNamespace();
1662        } catch(RemoteException re) {
1663          throw re.unwrapRemoteException(AccessControlException.class);
1664        }
1665      }
1666      
1667      /**
1668       * enable/disable restore failed storage.
1669       * 
1670       * @see ClientProtocol#restoreFailedStorage(String arg)
1671       */
1672      boolean restoreFailedStorage(String arg)
1673          throws AccessControlException, IOException{
1674        return namenode.restoreFailedStorage(arg);
1675      }
1676    
1677      /**
1678       * Refresh the hosts and exclude files.  (Rereads them.)
1679       * See {@link ClientProtocol#refreshNodes()} 
1680       * for more details.
1681       * 
1682       * @see ClientProtocol#refreshNodes()
1683       */
1684      public void refreshNodes() throws IOException {
1685        namenode.refreshNodes();
1686      }
1687    
1688      /**
1689       * Dumps DFS data structures into specified file.
1690       * 
1691       * @see ClientProtocol#metaSave(String)
1692       */
1693      public void metaSave(String pathname) throws IOException {
1694        namenode.metaSave(pathname);
1695      }
1696    
1697      /**
1698       * Requests the namenode to tell all datanodes to use a new, non-persistent
1699       * bandwidth value for dfs.balance.bandwidthPerSec.
1700       * See {@link ClientProtocol#setBalancerBandwidth(long)} 
1701       * for more details.
1702       * 
1703       * @see ClientProtocol#setBalancerBandwidth(long)
1704       */
1705      public void setBalancerBandwidth(long bandwidth) throws IOException {
1706        namenode.setBalancerBandwidth(bandwidth);
1707      }
1708        
1709      /**
1710       * @see ClientProtocol#finalizeUpgrade()
1711       */
1712      public void finalizeUpgrade() throws IOException {
1713        namenode.finalizeUpgrade();
1714      }
1715    
1716      /**
1717       * @see ClientProtocol#distributedUpgradeProgress(HdfsConstants.UpgradeAction)
1718       */
1719      public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action)
1720          throws IOException {
1721        return namenode.distributedUpgradeProgress(action);
1722      }
1723    
1724      /**
1725       */
1726      @Deprecated
1727      public boolean mkdirs(String src) throws IOException {
1728        return mkdirs(src, null, true);
1729      }
1730    
1731      /**
1732       * Create a directory (or hierarchy of directories) with the given
1733       * name and permission.
1734       *
1735       * @param src The path of the directory being created
1736       * @param permission The permission of the directory being created.
1737       * If permission == null, use {@link FsPermission#getDefault()}.
1738       * @param createParent create missing parent directory if true
1739       * 
1740       * @return True if the operation success.
1741       * 
1742       * @see ClientProtocol#mkdirs(String, FsPermission, boolean)
1743       */
1744      public boolean mkdirs(String src, FsPermission permission,
1745          boolean createParent) throws IOException {
1746        checkOpen();
1747        if (permission == null) {
1748          permission = FsPermission.getDefault();
1749        }
1750        FsPermission masked = permission.applyUMask(dfsClientConf.uMask);
1751        if(LOG.isDebugEnabled()) {
1752          LOG.debug(src + ": masked=" + masked);
1753        }
1754        try {
1755          return namenode.mkdirs(src, masked, createParent);
1756        } catch(RemoteException re) {
1757          throw re.unwrapRemoteException(AccessControlException.class,
1758                                         InvalidPathException.class,
1759                                         FileAlreadyExistsException.class,
1760                                         FileNotFoundException.class,
1761                                         ParentNotDirectoryException.class,
1762                                         SafeModeException.class,
1763                                         NSQuotaExceededException.class,
1764                                         UnresolvedPathException.class);
1765        }
1766      }
1767      
1768      /**
1769       * Same {{@link #mkdirs(String, FsPermission, boolean)} except
1770       * that the permissions has already been masked against umask.
1771       */
1772      public boolean primitiveMkdir(String src, FsPermission absPermission)
1773        throws IOException {
1774        checkOpen();
1775        if (absPermission == null) {
1776          absPermission = 
1777            FsPermission.getDefault().applyUMask(dfsClientConf.uMask);
1778        } 
1779    
1780        if(LOG.isDebugEnabled()) {
1781          LOG.debug(src + ": masked=" + absPermission);
1782        }
1783        try {
1784          return namenode.mkdirs(src, absPermission, true);
1785        } catch(RemoteException re) {
1786          throw re.unwrapRemoteException(AccessControlException.class,
1787                                         NSQuotaExceededException.class,
1788                                         DSQuotaExceededException.class,
1789                                         UnresolvedPathException.class);
1790        }
1791      }
1792    
1793      /**
1794       * Get {@link ContentSummary} rooted at the specified directory.
1795       * @param path The string representation of the path
1796       * 
1797       * @see ClientProtocol#getContentSummary(String)
1798       */
1799      ContentSummary getContentSummary(String src) throws IOException {
1800        try {
1801          return namenode.getContentSummary(src);
1802        } catch(RemoteException re) {
1803          throw re.unwrapRemoteException(AccessControlException.class,
1804                                         FileNotFoundException.class,
1805                                         UnresolvedPathException.class);
1806        }
1807      }
1808    
1809      /**
1810       * Sets or resets quotas for a directory.
1811       * @see ClientProtocol#setQuota(String, long, long)
1812       */
1813      void setQuota(String src, long namespaceQuota, long diskspaceQuota) 
1814          throws IOException {
1815        // sanity check
1816        if ((namespaceQuota <= 0 && namespaceQuota != HdfsConstants.QUOTA_DONT_SET &&
1817             namespaceQuota != HdfsConstants.QUOTA_RESET) ||
1818            (diskspaceQuota <= 0 && diskspaceQuota != HdfsConstants.QUOTA_DONT_SET &&
1819             diskspaceQuota != HdfsConstants.QUOTA_RESET)) {
1820          throw new IllegalArgumentException("Invalid values for quota : " +
1821                                             namespaceQuota + " and " + 
1822                                             diskspaceQuota);
1823                                             
1824        }
1825        try {
1826          namenode.setQuota(src, namespaceQuota, diskspaceQuota);
1827        } catch(RemoteException re) {
1828          throw re.unwrapRemoteException(AccessControlException.class,
1829                                         FileNotFoundException.class,
1830                                         NSQuotaExceededException.class,
1831                                         DSQuotaExceededException.class,
1832                                         UnresolvedPathException.class);
1833        }
1834      }
1835    
1836      /**
1837       * set the modification and access time of a file
1838       * 
1839       * @see ClientProtocol#setTimes(String, long, long)
1840       */
1841      public void setTimes(String src, long mtime, long atime) throws IOException {
1842        checkOpen();
1843        try {
1844          namenode.setTimes(src, mtime, atime);
1845        } catch(RemoteException re) {
1846          throw re.unwrapRemoteException(AccessControlException.class,
1847                                         FileNotFoundException.class,
1848                                         UnresolvedPathException.class);
1849        }
1850      }
1851    
1852      /**
1853       * @deprecated use {@link HdfsDataInputStream} instead.
1854       */
1855      @Deprecated
1856      public static class DFSDataInputStream extends HdfsDataInputStream {
1857    
1858        public DFSDataInputStream(DFSInputStream in) throws IOException {
1859          super(in);
1860        }
1861      }
1862      
1863      boolean shouldTryShortCircuitRead(InetSocketAddress targetAddr) {
1864        if (shortCircuitLocalReads && isLocalAddress(targetAddr)) {
1865          return true;
1866        }
1867        return false;
1868      }
1869    
1870      void reportChecksumFailure(String file, ExtendedBlock blk, DatanodeInfo dn) {
1871        DatanodeInfo [] dnArr = { dn };
1872        LocatedBlock [] lblocks = { new LocatedBlock(blk, dnArr) };
1873        reportChecksumFailure(file, lblocks);
1874      }
1875        
1876      // just reports checksum failure and ignores any exception during the report.
1877      void reportChecksumFailure(String file, LocatedBlock lblocks[]) {
1878        try {
1879          reportBadBlocks(lblocks);
1880        } catch (IOException ie) {
1881          LOG.info("Found corruption while reading " + file
1882              + ".  Error repairing corrupt blocks.  Bad blocks remain.", ie);
1883        }
1884      }
1885    
1886      @Override
1887      public String toString() {
1888        return getClass().getSimpleName() + "[clientName=" + clientName
1889            + ", ugi=" + ugi + "]"; 
1890      }
1891    
1892      void disableShortCircuit() {
1893        shortCircuitLocalReads = false;
1894      }
1895    }