001    /**
002     * Licensed to the Apache Software Foundation (ASF) under one
003     * or more contributor license agreements.  See the NOTICE file
004     * distributed with this work for additional information
005     * regarding copyright ownership.  The ASF licenses this file
006     * to you under the Apache License, Version 2.0 (the
007     * "License"); you may not use this file except in compliance
008     * with the License.  You may obtain a copy of the License at
009     *
010     *     http://www.apache.org/licenses/LICENSE-2.0
011     *
012     * Unless required by applicable law or agreed to in writing, software
013     * distributed under the License is distributed on an "AS IS" BASIS,
014     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015     * See the License for the specific language governing permissions and
016     * limitations under the License.
017     */
018    package org.apache.hadoop.hdfs;
019    
020    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
021    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
022    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
023    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
024    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT;
025    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY;
026    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT;
027    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY;
028    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT;
029    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_KEY;
030    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT;
031    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY;
032    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT;
033    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY;
034    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT;
035    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY;
036    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT;
037    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
038    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY;
039    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE;
040    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT;
041    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY;
042    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY;
043    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER;
044    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT;
045    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
046    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
047    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
048    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
049    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
050    
051    import java.io.BufferedOutputStream;
052    import java.io.DataInputStream;
053    import java.io.DataOutputStream;
054    import java.io.FileNotFoundException;
055    import java.io.IOException;
056    import java.io.OutputStream;
057    import java.net.InetAddress;
058    import java.net.InetSocketAddress;
059    import java.net.NetworkInterface;
060    import java.net.Socket;
061    import java.net.SocketException;
062    import java.net.SocketAddress;
063    import java.net.URI;
064    import java.net.UnknownHostException;
065    import java.util.ArrayList;
066    import java.util.Collections;
067    import java.util.EnumSet;
068    import java.util.HashMap;
069    import java.util.List;
070    import java.util.Map;
071    import java.util.Random;
072    
073    import javax.net.SocketFactory;
074    
075    import org.apache.commons.logging.Log;
076    import org.apache.commons.logging.LogFactory;
077    import org.apache.hadoop.classification.InterfaceAudience;
078    import org.apache.hadoop.conf.Configuration;
079    import org.apache.hadoop.fs.BlockLocation;
080    import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
081    import org.apache.hadoop.fs.ContentSummary;
082    import org.apache.hadoop.fs.CreateFlag;
083    import org.apache.hadoop.fs.FileAlreadyExistsException;
084    import org.apache.hadoop.fs.FileSystem;
085    import org.apache.hadoop.fs.FsServerDefaults;
086    import org.apache.hadoop.fs.FsStatus;
087    import org.apache.hadoop.fs.InvalidPathException;
088    import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
089    import org.apache.hadoop.fs.Options;
090    import org.apache.hadoop.fs.ParentNotDirectoryException;
091    import org.apache.hadoop.fs.Path;
092    import org.apache.hadoop.fs.UnresolvedLinkException;
093    import org.apache.hadoop.fs.permission.FsPermission;
094    import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
095    import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
096    import org.apache.hadoop.hdfs.protocol.ClientProtocol;
097    import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
098    import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
099    import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
100    import org.apache.hadoop.hdfs.protocol.DirectoryListing;
101    import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
102    import org.apache.hadoop.hdfs.protocol.HdfsConstants;
103    import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
104    import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
105    import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
106    import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
107    import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
108    import org.apache.hadoop.hdfs.protocol.LocatedBlock;
109    import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
110    import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
111    import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
112    import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
113    import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
114    import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
115    import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
116    import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
117    import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
118    import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
119    import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
120    import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
121    import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
122    import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
123    import org.apache.hadoop.hdfs.server.namenode.NameNode;
124    import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
125    import org.apache.hadoop.io.DataOutputBuffer;
126    import org.apache.hadoop.io.EnumSetWritable;
127    import org.apache.hadoop.io.IOUtils;
128    import org.apache.hadoop.io.MD5Hash;
129    import org.apache.hadoop.io.Text;
130    import org.apache.hadoop.ipc.Client;
131    import org.apache.hadoop.ipc.RPC;
132    import org.apache.hadoop.ipc.RemoteException;
133    import org.apache.hadoop.net.DNS;
134    import org.apache.hadoop.net.NetUtils;
135    import org.apache.hadoop.security.AccessControlException;
136    import org.apache.hadoop.security.UserGroupInformation;
137    import org.apache.hadoop.security.token.SecretManager.InvalidToken;
138    import org.apache.hadoop.security.token.Token;
139    import org.apache.hadoop.security.token.TokenRenewer;
140    import org.apache.hadoop.util.DataChecksum;
141    import org.apache.hadoop.util.Progressable;
142    
143    import com.google.common.annotations.VisibleForTesting;
144    import com.google.common.base.Joiner;
145    import com.google.common.base.Preconditions;
146    import com.google.common.net.InetAddresses;
147    
148    /********************************************************
149     * DFSClient can connect to a Hadoop Filesystem and 
150     * perform basic file tasks.  It uses the ClientProtocol
151     * to communicate with a NameNode daemon, and connects 
152     * directly to DataNodes to read/write block data.
153     *
154     * Hadoop DFS users should obtain an instance of 
155     * DistributedFileSystem, which uses DFSClient to handle
156     * filesystem tasks.
157     *
158     ********************************************************/
159    @InterfaceAudience.Private
160    public class DFSClient implements java.io.Closeable {
161      public static final Log LOG = LogFactory.getLog(DFSClient.class);
162      public static final long SERVER_DEFAULTS_VALIDITY_PERIOD = 60 * 60 * 1000L; // 1 hour
163      static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
164      final ClientProtocol namenode;
165      /* The service used for delegation tokens */
166      private Text dtService;
167    
168      final UserGroupInformation ugi;
169      volatile boolean clientRunning = true;
170      volatile long lastLeaseRenewal;
171      private volatile FsServerDefaults serverDefaults;
172      private volatile long serverDefaultsLastUpdate;
173      final String clientName;
174      Configuration conf;
175      SocketFactory socketFactory;
176      final ReplaceDatanodeOnFailure dtpReplaceDatanodeOnFailure;
177      final FileSystem.Statistics stats;
178      final int hdfsTimeout;    // timeout value for a DFS operation.
179      final LeaseRenewer leaserenewer;
180      final SocketCache socketCache;
181      final Conf dfsClientConf;
182      private Random r = new Random();
183      private SocketAddress[] localInterfaceAddrs;
184    
185      /**
186       * DFSClient configuration 
187       */
188      static class Conf {
189        final int maxFailoverAttempts;
190        final int failoverSleepBaseMillis;
191        final int failoverSleepMaxMillis;
192        final int maxBlockAcquireFailures;
193        final int confTime;
194        final int ioBufferSize;
195        final int checksumType;
196        final int bytesPerChecksum;
197        final int writePacketSize;
198        final int socketTimeout;
199        final int socketCacheCapacity;
200        /** Wait time window (in msec) if BlockMissingException is caught */
201        final int timeWindow;
202        final int nCachedConnRetry;
203        final int nBlockWriteRetry;
204        final int nBlockWriteLocateFollowingRetry;
205        final long defaultBlockSize;
206        final long prefetchSize;
207        final short defaultReplication;
208        final String taskId;
209        final FsPermission uMask;
210        final boolean useLegacyBlockReader;
211    
212        Conf(Configuration conf) {
213          maxFailoverAttempts = conf.getInt(
214              DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY,
215              DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT);
216          failoverSleepBaseMillis = conf.getInt(
217              DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY,
218              DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT);
219          failoverSleepMaxMillis = conf.getInt(
220              DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY,
221              DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT);
222    
223          maxBlockAcquireFailures = conf.getInt(
224              DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
225              DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_DEFAULT);
226          confTime = conf.getInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
227              HdfsServerConstants.WRITE_TIMEOUT);
228          ioBufferSize = conf.getInt(
229              CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
230              CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
231          checksumType = getChecksumType(conf);
232          bytesPerChecksum = conf.getInt(DFS_BYTES_PER_CHECKSUM_KEY,
233              DFS_BYTES_PER_CHECKSUM_DEFAULT);
234          socketTimeout = conf.getInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY,
235              HdfsServerConstants.READ_TIMEOUT);
236          /** dfs.write.packet.size is an internal config variable */
237          writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
238              DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
239          defaultBlockSize = conf.getLongBytes(DFS_BLOCK_SIZE_KEY,
240              DFS_BLOCK_SIZE_DEFAULT);
241          defaultReplication = (short) conf.getInt(
242              DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);
243          taskId = conf.get("mapreduce.task.attempt.id", "NONMAPREDUCE");
244          socketCacheCapacity = conf.getInt(DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY,
245              DFS_CLIENT_SOCKET_CACHE_CAPACITY_DEFAULT);
246          prefetchSize = conf.getLong(DFS_CLIENT_READ_PREFETCH_SIZE_KEY,
247              10 * defaultBlockSize);
248          timeWindow = conf
249              .getInt(DFS_CLIENT_RETRY_WINDOW_BASE, 3000);
250          nCachedConnRetry = conf.getInt(DFS_CLIENT_CACHED_CONN_RETRY_KEY,
251              DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT);
252          nBlockWriteRetry = conf.getInt(DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY,
253              DFS_CLIENT_BLOCK_WRITE_RETRIES_DEFAULT);
254          nBlockWriteLocateFollowingRetry = conf
255              .getInt(DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY,
256                  DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_DEFAULT);
257          uMask = FsPermission.getUMask(conf);
258          useLegacyBlockReader = conf.getBoolean(
259              DFS_CLIENT_USE_LEGACY_BLOCKREADER,
260              DFS_CLIENT_USE_LEGACY_BLOCKREADER_DEFAULT);
261        }
262    
263        private int getChecksumType(Configuration conf) {
264          String checksum = conf.get(DFSConfigKeys.DFS_CHECKSUM_TYPE_KEY,
265              DFSConfigKeys.DFS_CHECKSUM_TYPE_DEFAULT);
266          if ("CRC32".equals(checksum)) {
267            return DataChecksum.CHECKSUM_CRC32;
268          } else if ("CRC32C".equals(checksum)) {
269            return DataChecksum.CHECKSUM_CRC32C;
270          } else if ("NULL".equals(checksum)) {
271            return DataChecksum.CHECKSUM_NULL;
272          } else {
273            LOG.warn("Bad checksum type: " + checksum + ". Using default.");
274            return DataChecksum.CHECKSUM_CRC32C;
275          }
276        }
277    
278        private DataChecksum createChecksum() {
279          return DataChecksum.newDataChecksum(
280              checksumType, bytesPerChecksum);
281        }
282      }
283     
284      Conf getConf() {
285        return dfsClientConf;
286      }
287      
288      /**
289       * A map from file names to {@link DFSOutputStream} objects
290       * that are currently being written by this client.
291       * Note that a file can only be written by a single client.
292       */
293      private final Map<String, DFSOutputStream> filesBeingWritten
294          = new HashMap<String, DFSOutputStream>();
295    
296      private boolean shortCircuitLocalReads;
297      
298      /**
299       * Same as this(NameNode.getAddress(conf), conf);
300       * @see #DFSClient(InetSocketAddress, Configuration)
301       * @deprecated Deprecated at 0.21
302       */
303      @Deprecated
304      public DFSClient(Configuration conf) throws IOException {
305        this(NameNode.getAddress(conf), conf);
306      }
307      
308      public DFSClient(InetSocketAddress address, Configuration conf) throws IOException {
309        this(NameNode.getUri(address), conf);
310      }
311    
312      /**
313       * Same as this(nameNodeUri, conf, null);
314       * @see #DFSClient(InetSocketAddress, Configuration, org.apache.hadoop.fs.FileSystem.Statistics)
315       */
316      public DFSClient(URI nameNodeUri, Configuration conf
317          ) throws IOException {
318        this(nameNodeUri, conf, null);
319      }
320    
321      /**
322       * Same as this(nameNodeUri, null, conf, stats);
323       * @see #DFSClient(InetSocketAddress, ClientProtocol, Configuration, org.apache.hadoop.fs.FileSystem.Statistics) 
324       */
325      public DFSClient(URI nameNodeUri, Configuration conf,
326                       FileSystem.Statistics stats)
327        throws IOException {
328        this(nameNodeUri, null, conf, stats);
329      }
330      
331      /** 
332       * Create a new DFSClient connected to the given nameNodeUri or rpcNamenode.
333       * Exactly one of nameNodeUri or rpcNamenode must be null.
334       */
335      DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode,
336          Configuration conf, FileSystem.Statistics stats)
337        throws IOException {
338        // Copy only the required DFSClient configuration
339        this.dfsClientConf = new Conf(conf);
340        this.conf = conf;
341        this.stats = stats;
342        this.socketFactory = NetUtils.getSocketFactory(conf, ClientProtocol.class);
343        this.dtpReplaceDatanodeOnFailure = ReplaceDatanodeOnFailure.get(conf);
344    
345        // The hdfsTimeout is currently the same as the ipc timeout 
346        this.hdfsTimeout = Client.getTimeout(conf);
347        this.ugi = UserGroupInformation.getCurrentUser();
348        
349        final String authority = nameNodeUri == null? "null": nameNodeUri.getAuthority();
350        this.leaserenewer = LeaseRenewer.getInstance(authority, ugi, this);
351        this.clientName = leaserenewer.getClientName(dfsClientConf.taskId);
352        
353        this.socketCache = new SocketCache(dfsClientConf.socketCacheCapacity);
354        
355        
356        if (rpcNamenode != null) {
357          // This case is used for testing.
358          Preconditions.checkArgument(nameNodeUri == null);
359          this.namenode = rpcNamenode;
360          dtService = null;
361        } else {
362          Preconditions.checkArgument(nameNodeUri != null,
363              "null URI");
364          NameNodeProxies.ProxyAndInfo<ClientProtocol> proxyInfo =
365            NameNodeProxies.createProxy(conf, nameNodeUri, ClientProtocol.class);
366          this.dtService = proxyInfo.getDelegationTokenService();
367          this.namenode = proxyInfo.getProxy();
368        }
369    
370        // read directly from the block file if configured.
371        this.shortCircuitLocalReads = conf.getBoolean(
372            DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY,
373            DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_DEFAULT);
374        if (LOG.isDebugEnabled()) {
375          LOG.debug("Short circuit read is " + shortCircuitLocalReads);
376        }
377        String localInterfaces[] =
378          conf.getTrimmedStrings(DFSConfigKeys.DFS_CLIENT_LOCAL_INTERFACES);
379        localInterfaceAddrs = getLocalInterfaceAddrs(localInterfaces);
380        if (LOG.isDebugEnabled() && 0 != localInterfaces.length) {
381          LOG.debug("Using local interfaces [" +
382          Joiner.on(',').join(localInterfaces)+ "] with addresses [" +
383          Joiner.on(',').join(localInterfaceAddrs) + "]");
384        }
385      }
386    
387      /**
388       * Return the socket addresses to use with each configured
389       * local interface. Local interfaces may be specified by IP
390       * address, IP address range using CIDR notation, interface
391       * name (e.g. eth0) or sub-interface name (e.g. eth0:0).
392       * The socket addresses consist of the IPs for the interfaces
393       * and the ephemeral port (port 0). If an IP, IP range, or
394       * interface name matches an interface with sub-interfaces
395       * only the IP of the interface is used. Sub-interfaces can
396       * be used by specifying them explicitly (by IP or name).
397       * 
398       * @return SocketAddresses for the configured local interfaces,
399       *    or an empty array if none are configured
400       * @throws UnknownHostException if a given interface name is invalid
401       */
402      private static SocketAddress[] getLocalInterfaceAddrs(
403          String interfaceNames[]) throws UnknownHostException {
404        List<SocketAddress> localAddrs = new ArrayList<SocketAddress>();
405        for (String interfaceName : interfaceNames) {
406          if (InetAddresses.isInetAddress(interfaceName)) {
407            localAddrs.add(new InetSocketAddress(interfaceName, 0));
408          } else if (NetUtils.isValidSubnet(interfaceName)) {
409            for (InetAddress addr : NetUtils.getIPs(interfaceName, false)) {
410              localAddrs.add(new InetSocketAddress(addr, 0));
411            }
412          } else {
413            for (String ip : DNS.getIPs(interfaceName, false)) {
414              localAddrs.add(new InetSocketAddress(ip, 0));
415            }
416          }
417        }
418        return localAddrs.toArray(new SocketAddress[localAddrs.size()]);
419      }
420    
421      /**
422       * Select one of the configured local interfaces at random. We use a random
423       * interface because other policies like round-robin are less effective
424       * given that we cache connections to datanodes.
425       *
426       * @return one of the local interface addresses at random, or null if no
427       *    local interfaces are configured
428       */
429      SocketAddress getRandomLocalInterfaceAddr() {
430        if (localInterfaceAddrs.length == 0) {
431          return null;
432        }
433        final int idx = r.nextInt(localInterfaceAddrs.length);
434        final SocketAddress addr = localInterfaceAddrs[idx];
435        if (LOG.isDebugEnabled()) {
436          LOG.debug("Using local interface " + addr);
437        }
438        return addr;
439      }
440    
441      /**
442       * Return the number of times the client should go back to the namenode
443       * to retrieve block locations when reading.
444       */
445      int getMaxBlockAcquireFailures() {
446        return dfsClientConf.maxBlockAcquireFailures;
447      }
448    
449      /**
450       * Return the timeout that clients should use when writing to datanodes.
451       * @param numNodes the number of nodes in the pipeline.
452       */
453      int getDatanodeWriteTimeout(int numNodes) {
454        return (dfsClientConf.confTime > 0) ?
455          (dfsClientConf.confTime + HdfsServerConstants.WRITE_TIMEOUT_EXTENSION * numNodes) : 0;
456      }
457    
458      int getDatanodeReadTimeout(int numNodes) {
459        return dfsClientConf.socketTimeout > 0 ?
460            (HdfsServerConstants.READ_TIMEOUT_EXTENSION * numNodes +
461                dfsClientConf.socketTimeout) : 0;
462      }
463      
464      int getHdfsTimeout() {
465        return hdfsTimeout;
466      }
467      
468      String getClientName() {
469        return clientName;
470      }
471    
472      void checkOpen() throws IOException {
473        if (!clientRunning) {
474          IOException result = new IOException("Filesystem closed");
475          throw result;
476        }
477      }
478    
479      /** Put a file. */
480      void putFileBeingWritten(final String src, final DFSOutputStream out) {
481        synchronized(filesBeingWritten) {
482          filesBeingWritten.put(src, out);
483          // update the last lease renewal time only when there was no
484          // writes. once there is one write stream open, the lease renewer
485          // thread keeps it updated well with in anyone's expiration time.
486          if (lastLeaseRenewal == 0) {
487            updateLastLeaseRenewal();
488          }
489        }
490      }
491    
492      /** Remove a file. */
493      void removeFileBeingWritten(final String src) {
494        synchronized(filesBeingWritten) {
495          filesBeingWritten.remove(src);
496          if (filesBeingWritten.isEmpty()) {
497            lastLeaseRenewal = 0;
498          }
499        }
500      }
501    
502      /** Is file-being-written map empty? */
503      boolean isFilesBeingWrittenEmpty() {
504        synchronized(filesBeingWritten) {
505          return filesBeingWritten.isEmpty();
506        }
507      }
508      
509      /** @return true if the client is running */
510      boolean isClientRunning() {
511        return clientRunning;
512      }
513    
514      long getLastLeaseRenewal() {
515        return lastLeaseRenewal;
516      }
517    
518      void updateLastLeaseRenewal() {
519        synchronized(filesBeingWritten) {
520          if (filesBeingWritten.isEmpty()) {
521            return;
522          }
523          lastLeaseRenewal = System.currentTimeMillis();
524        }
525      }
526    
527      /**
528       * Renew leases.
529       * @return true if lease was renewed. May return false if this
530       * client has been closed or has no files open.
531       **/
532      boolean renewLease() throws IOException {
533        if (clientRunning && !isFilesBeingWrittenEmpty()) {
534          try {
535            namenode.renewLease(clientName);
536            updateLastLeaseRenewal();
537            return true;
538          } catch (IOException e) {
539            // Abort if the lease has already expired. 
540            final long elapsed = System.currentTimeMillis() - getLastLeaseRenewal();
541            if (elapsed > HdfsConstants.LEASE_SOFTLIMIT_PERIOD) {
542              LOG.warn("Failed to renew lease for " + clientName + " for "
543                  + (elapsed/1000) + " seconds (>= soft-limit ="
544                  + (HdfsConstants.LEASE_SOFTLIMIT_PERIOD/1000) + " seconds.) "
545                  + "Closing all files being written ...", e);
546              closeAllFilesBeingWritten(true);
547            } else {
548              // Let the lease renewer handle it and retry.
549              throw e;
550            }
551          }
552        }
553        return false;
554      }
555      
556      /**
557       * Close connections the Namenode.
558       */
559      void closeConnectionToNamenode() {
560        RPC.stopProxy(namenode);
561      }
562      
563      /** Abort and release resources held.  Ignore all errors. */
564      void abort() {
565        clientRunning = false;
566        closeAllFilesBeingWritten(true);
567        socketCache.clear();
568        closeConnectionToNamenode();
569      }
570    
571      /** Close/abort all files being written. */
572      private void closeAllFilesBeingWritten(final boolean abort) {
573        for(;;) {
574          final String src;
575          final DFSOutputStream out;
576          synchronized(filesBeingWritten) {
577            if (filesBeingWritten.isEmpty()) {
578              return;
579            }
580            src = filesBeingWritten.keySet().iterator().next();
581            out = filesBeingWritten.remove(src);
582          }
583          if (out != null) {
584            try {
585              if (abort) {
586                out.abort();
587              } else {
588                out.close();
589              }
590            } catch(IOException ie) {
591              LOG.error("Failed to " + (abort? "abort": "close") + " file " + src,
592                  ie);
593            }
594          }
595        }
596      }
597    
598      /**
599       * Close the file system, abandoning all of the leases and files being
600       * created and close connections to the namenode.
601       */
602      public synchronized void close() throws IOException {
603        if(clientRunning) {
604          closeAllFilesBeingWritten(false);
605          socketCache.clear();
606          clientRunning = false;
607          leaserenewer.closeClient(this);
608          // close connections to the namenode
609          closeConnectionToNamenode();
610        }
611      }
612    
613      /**
614       * Get the default block size for this cluster
615       * @return the default block size in bytes
616       */
617      public long getDefaultBlockSize() {
618        return dfsClientConf.defaultBlockSize;
619      }
620        
621      /**
622       * @see ClientProtocol#getPreferredBlockSize(String)
623       */
624      public long getBlockSize(String f) throws IOException {
625        try {
626          return namenode.getPreferredBlockSize(f);
627        } catch (IOException ie) {
628          LOG.warn("Problem getting block size", ie);
629          throw ie;
630        }
631      }
632    
633      /**
634       * Get server default values for a number of configuration params.
635       * @see ClientProtocol#getServerDefaults()
636       */
637      public FsServerDefaults getServerDefaults() throws IOException {
638        long now = System.currentTimeMillis();
639        if (now - serverDefaultsLastUpdate > SERVER_DEFAULTS_VALIDITY_PERIOD) {
640          serverDefaults = namenode.getServerDefaults();
641          serverDefaultsLastUpdate = now;
642        }
643        return serverDefaults;
644      }
645      
646      /**
647       * Get a canonical token service name for this client's tokens.  Null should
648       * be returned if the client is not using tokens.
649       * @return the token service for the client
650       */
651      @InterfaceAudience.LimitedPrivate( { "HDFS" }) 
652      public String getCanonicalServiceName() {
653        return (dtService != null) ? dtService.toString() : null;
654      }
655      
656      /**
657       * @see ClientProtocol#getDelegationToken(Text)
658       */
659      public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
660          throws IOException {
661        assert dtService != null;
662        Token<DelegationTokenIdentifier> token =
663          namenode.getDelegationToken(renewer);
664        token.setService(this.dtService);
665    
666        LOG.info("Created " + DelegationTokenIdentifier.stringifyToken(token));
667        return token;
668      }
669    
670      /**
671       * Renew a delegation token
672       * @param token the token to renew
673       * @return the new expiration time
674       * @throws InvalidToken
675       * @throws IOException
676       * @deprecated Use Token.renew instead.
677       */
678      public long renewDelegationToken(Token<DelegationTokenIdentifier> token)
679          throws InvalidToken, IOException {
680        LOG.info("Renewing " + DelegationTokenIdentifier.stringifyToken(token));
681        try {
682          return token.renew(conf);
683        } catch (InterruptedException ie) {                                       
684          throw new RuntimeException("caught interrupted", ie);
685        } catch (RemoteException re) {
686          throw re.unwrapRemoteException(InvalidToken.class,
687                                         AccessControlException.class);
688        }
689      }
690    
691      /**
692       * Get {@link BlockReader} for short circuited local reads.
693       */
694      static BlockReader getLocalBlockReader(Configuration conf,
695          String src, ExtendedBlock blk, Token<BlockTokenIdentifier> accessToken,
696          DatanodeInfo chosenNode, int socketTimeout, long offsetIntoBlock)
697          throws InvalidToken, IOException {
698        try {
699          return BlockReaderLocal.newBlockReader(conf, src, blk, accessToken,
700              chosenNode, socketTimeout, offsetIntoBlock, blk.getNumBytes()
701                  - offsetIntoBlock);
702        } catch (RemoteException re) {
703          throw re.unwrapRemoteException(InvalidToken.class,
704              AccessControlException.class);
705        }
706      }
707      
708      private static Map<String, Boolean> localAddrMap = Collections
709          .synchronizedMap(new HashMap<String, Boolean>());
710      
711      private static boolean isLocalAddress(InetSocketAddress targetAddr) {
712        InetAddress addr = targetAddr.getAddress();
713        Boolean cached = localAddrMap.get(addr.getHostAddress());
714        if (cached != null) {
715          if (LOG.isTraceEnabled()) {
716            LOG.trace("Address " + targetAddr +
717                      (cached ? " is local" : " is not local"));
718          }
719          return cached;
720        }
721    
722        // Check if the address is any local or loop back
723        boolean local = addr.isAnyLocalAddress() || addr.isLoopbackAddress();
724    
725        // Check if the address is defined on any interface
726        if (!local) {
727          try {
728            local = NetworkInterface.getByInetAddress(addr) != null;
729          } catch (SocketException e) {
730            local = false;
731          }
732        }
733        if (LOG.isTraceEnabled()) {
734          LOG.trace("Address " + targetAddr +
735                    (local ? " is local" : " is not local"));
736        }
737        localAddrMap.put(addr.getHostAddress(), local);
738        return local;
739      }
740      
741      /**
742       * Should the block access token be refetched on an exception
743       * 
744       * @param ex Exception received
745       * @param targetAddr Target datanode address from where exception was received
746       * @return true if block access token has expired or invalid and it should be
747       *         refetched
748       */
749      private static boolean tokenRefetchNeeded(IOException ex,
750          InetSocketAddress targetAddr) {
751        /*
752         * Get a new access token and retry. Retry is needed in 2 cases. 1) When
753         * both NN and DN re-started while DFSClient holding a cached access token.
754         * 2) In the case that NN fails to update its access key at pre-set interval
755         * (by a wide margin) and subsequently restarts. In this case, DN
756         * re-registers itself with NN and receives a new access key, but DN will
757         * delete the old access key from its memory since it's considered expired
758         * based on the estimated expiration date.
759         */
760        if (ex instanceof InvalidBlockTokenException || ex instanceof InvalidToken) {
761          LOG.info("Access token was invalid when connecting to " + targetAddr
762              + " : " + ex);
763          return true;
764        }
765        return false;
766      }
767      
768      /**
769       * Cancel a delegation token
770       * @param token the token to cancel
771       * @throws InvalidToken
772       * @throws IOException
773       * @deprecated Use Token.cancel instead.
774       */
775      public void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
776          throws InvalidToken, IOException {
777        LOG.info("Cancelling " + DelegationTokenIdentifier.stringifyToken(token));
778        try {
779          token.cancel(conf);
780         } catch (InterruptedException ie) {                                       
781          throw new RuntimeException("caught interrupted", ie);
782        } catch (RemoteException re) {
783          throw re.unwrapRemoteException(InvalidToken.class,
784                                         AccessControlException.class);
785        }
786      }
787      
788      @InterfaceAudience.Private
789      public static class Renewer extends TokenRenewer {
790        
791        static {
792          //Ensure that HDFS Configuration files are loaded before trying to use
793          // the renewer.
794          HdfsConfiguration.init();
795        }
796        
797        @Override
798        public boolean handleKind(Text kind) {
799          return DelegationTokenIdentifier.HDFS_DELEGATION_KIND.equals(kind);
800        }
801    
802        @SuppressWarnings("unchecked")
803        @Override
804        public long renew(Token<?> token, Configuration conf) throws IOException {
805          Token<DelegationTokenIdentifier> delToken = 
806            (Token<DelegationTokenIdentifier>) token;
807          ClientProtocol nn = getNNProxy(delToken, conf);
808          try {
809            return nn.renewDelegationToken(delToken);
810          } catch (RemoteException re) {
811            throw re.unwrapRemoteException(InvalidToken.class, 
812                                           AccessControlException.class);
813          }
814        }
815    
816        @SuppressWarnings("unchecked")
817        @Override
818        public void cancel(Token<?> token, Configuration conf) throws IOException {
819          Token<DelegationTokenIdentifier> delToken = 
820              (Token<DelegationTokenIdentifier>) token;
821          LOG.info("Cancelling " + 
822                   DelegationTokenIdentifier.stringifyToken(delToken));
823          ClientProtocol nn = getNNProxy(delToken, conf);
824          try {
825            nn.cancelDelegationToken(delToken);
826          } catch (RemoteException re) {
827            throw re.unwrapRemoteException(InvalidToken.class,
828                AccessControlException.class);
829          }
830        }
831        
832        private static ClientProtocol getNNProxy(
833            Token<DelegationTokenIdentifier> token, Configuration conf)
834            throws IOException {
835          URI uri = HAUtil.getServiceUriFromToken(token);
836          if (HAUtil.isTokenForLogicalUri(token) &&
837              !HAUtil.isLogicalUri(conf, uri)) {
838            // If the token is for a logical nameservice, but the configuration
839            // we have disagrees about that, we can't actually renew it.
840            // This can be the case in MR, for example, if the RM doesn't
841            // have all of the HA clusters configured in its configuration.
842            throw new IOException("Unable to map logical nameservice URI '" +
843                uri + "' to a NameNode. Local configuration does not have " +
844                "a failover proxy provider configured.");
845          }
846          
847          NameNodeProxies.ProxyAndInfo<ClientProtocol> info =
848            NameNodeProxies.createProxy(conf, uri, ClientProtocol.class);
849          assert info.getDelegationTokenService().equals(token.getService()) :
850            "Returned service '" + info.getDelegationTokenService().toString() +
851            "' doesn't match expected service '" +
852            token.getService().toString() + "'";
853            
854          return info.getProxy();
855        }
856    
857        @Override
858        public boolean isManaged(Token<?> token) throws IOException {
859          return true;
860        }
861        
862      }
863    
864      /**
865       * Report corrupt blocks that were discovered by the client.
866       * @see ClientProtocol#reportBadBlocks(LocatedBlock[])
867       */
868      public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
869        namenode.reportBadBlocks(blocks);
870      }
871      
872      public short getDefaultReplication() {
873        return dfsClientConf.defaultReplication;
874      }
875      
876      /*
877       * This is just a wrapper around callGetBlockLocations, but non-static so that
878       * we can stub it out for tests.
879       */
880      @VisibleForTesting
881      public LocatedBlocks getLocatedBlocks(String src, long start, long length)
882          throws IOException {
883        return callGetBlockLocations(namenode, src, start, length);
884      }
885    
886      /**
887       * @see ClientProtocol#getBlockLocations(String, long, long)
888       */
889      static LocatedBlocks callGetBlockLocations(ClientProtocol namenode,
890          String src, long start, long length) 
891          throws IOException {
892        try {
893          return namenode.getBlockLocations(src, start, length);
894        } catch(RemoteException re) {
895          throw re.unwrapRemoteException(AccessControlException.class,
896                                         FileNotFoundException.class,
897                                         UnresolvedPathException.class);
898        }
899      }
900    
901      /**
902       * Recover a file's lease
903       * @param src a file's path
904       * @return true if the file is already closed
905       * @throws IOException
906       */
907      boolean recoverLease(String src) throws IOException {
908        checkOpen();
909    
910        try {
911          return namenode.recoverLease(src, clientName);
912        } catch (RemoteException re) {
913          throw re.unwrapRemoteException(FileNotFoundException.class,
914                                         AccessControlException.class);
915        }
916      }
917    
918      /**
919       * Get block location info about file
920       * 
921       * getBlockLocations() returns a list of hostnames that store 
922       * data for a specific file region.  It returns a set of hostnames
923       * for every block within the indicated region.
924       *
925       * This function is very useful when writing code that considers
926       * data-placement when performing operations.  For example, the
927       * MapReduce system tries to schedule tasks on the same machines
928       * as the data-block the task processes. 
929       */
930      public BlockLocation[] getBlockLocations(String src, long start, 
931        long length) throws IOException, UnresolvedLinkException {
932        LocatedBlocks blocks = getLocatedBlocks(src, start, length);
933        return DFSUtil.locatedBlocks2Locations(blocks);
934      }
935      
936      public DFSInputStream open(String src) 
937          throws IOException, UnresolvedLinkException {
938        return open(src, dfsClientConf.ioBufferSize, true, null);
939      }
940    
941      /**
942       * Create an input stream that obtains a nodelist from the
943       * namenode, and then reads from all the right places.  Creates
944       * inner subclass of InputStream that does the right out-of-band
945       * work.
946       * @deprecated Use {@link #open(String, int, boolean)} instead.
947       */
948      @Deprecated
949      public DFSInputStream open(String src, int buffersize, boolean verifyChecksum,
950                                 FileSystem.Statistics stats)
951          throws IOException, UnresolvedLinkException {
952        return open(src, buffersize, verifyChecksum);
953      }
954      
955    
956      /**
957       * Create an input stream that obtains a nodelist from the
958       * namenode, and then reads from all the right places.  Creates
959       * inner subclass of InputStream that does the right out-of-band
960       * work.
961       */
962      public DFSInputStream open(String src, int buffersize, boolean verifyChecksum)
963          throws IOException, UnresolvedLinkException {
964        checkOpen();
965        //    Get block info from namenode
966        return new DFSInputStream(this, src, buffersize, verifyChecksum);
967      }
968    
969      /**
970       * Get the namenode associated with this DFSClient object
971       * @return the namenode associated with this DFSClient object
972       */
973      public ClientProtocol getNamenode() {
974        return namenode;
975      }
976      
977      /**
978       * Call {@link #create(String, boolean, short, long, Progressable)} with
979       * default <code>replication</code> and <code>blockSize<code> and null <code>
980       * progress</code>.
981       */
982      public OutputStream create(String src, boolean overwrite) 
983          throws IOException {
984        return create(src, overwrite, dfsClientConf.defaultReplication,
985            dfsClientConf.defaultBlockSize, null);
986      }
987        
988      /**
989       * Call {@link #create(String, boolean, short, long, Progressable)} with
990       * default <code>replication</code> and <code>blockSize<code>.
991       */
992      public OutputStream create(String src, 
993                                 boolean overwrite,
994                                 Progressable progress) throws IOException {
995        return create(src, overwrite, dfsClientConf.defaultReplication,
996            dfsClientConf.defaultBlockSize, progress);
997      }
998        
999      /**
1000       * Call {@link #create(String, boolean, short, long, Progressable)} with
1001       * null <code>progress</code>.
1002       */
1003      public OutputStream create(String src, 
1004                                 boolean overwrite, 
1005                                 short replication,
1006                                 long blockSize) throws IOException {
1007        return create(src, overwrite, replication, blockSize, null);
1008      }
1009    
1010      /**
1011       * Call {@link #create(String, boolean, short, long, Progressable, int)}
1012       * with default bufferSize.
1013       */
1014      public OutputStream create(String src, boolean overwrite, short replication,
1015          long blockSize, Progressable progress) throws IOException {
1016        return create(src, overwrite, replication, blockSize, progress,
1017            dfsClientConf.ioBufferSize);
1018      }
1019    
1020      /**
1021       * Call {@link #create(String, FsPermission, EnumSet, short, long, 
1022       * Progressable, int)} with default <code>permission</code>
1023       * {@link FsPermission#getDefault()}.
1024       * 
1025       * @param src File name
1026       * @param overwrite overwrite an existing file if true
1027       * @param replication replication factor for the file
1028       * @param blockSize maximum block size
1029       * @param progress interface for reporting client progress
1030       * @param buffersize underlying buffersize
1031       * 
1032       * @return output stream
1033       */
1034      public OutputStream create(String src,
1035                                 boolean overwrite,
1036                                 short replication,
1037                                 long blockSize,
1038                                 Progressable progress,
1039                                 int buffersize)
1040          throws IOException {
1041        return create(src, FsPermission.getDefault(),
1042            overwrite ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
1043                : EnumSet.of(CreateFlag.CREATE), replication, blockSize, progress,
1044            buffersize);
1045      }
1046    
1047      /**
1048       * Call {@link #create(String, FsPermission, EnumSet, boolean, short, 
1049       * long, Progressable, int)} with <code>createParent</code> set to true.
1050       */
1051      public DFSOutputStream create(String src, 
1052                                 FsPermission permission,
1053                                 EnumSet<CreateFlag> flag, 
1054                                 short replication,
1055                                 long blockSize,
1056                                 Progressable progress,
1057                                 int buffersize)
1058          throws IOException {
1059        return create(src, permission, flag, true,
1060            replication, blockSize, progress, buffersize);
1061      }
1062    
1063      /**
1064       * Create a new dfs file with the specified block replication 
1065       * with write-progress reporting and return an output stream for writing
1066       * into the file.  
1067       * 
1068       * @param src File name
1069       * @param permission The permission of the directory being created.
1070       *          If null, use default permission {@link FsPermission#getDefault()}
1071       * @param flag indicates create a new file or create/overwrite an
1072       *          existing file or append to an existing file
1073       * @param createParent create missing parent directory if true
1074       * @param replication block replication
1075       * @param blockSize maximum block size
1076       * @param progress interface for reporting client progress
1077       * @param buffersize underlying buffer size 
1078       * 
1079       * @return output stream
1080       * 
1081       * @see ClientProtocol#create(String, FsPermission, String, EnumSetWritable,
1082       * boolean, short, long) for detailed description of exceptions thrown
1083       */
1084      public DFSOutputStream create(String src, 
1085                                 FsPermission permission,
1086                                 EnumSet<CreateFlag> flag, 
1087                                 boolean createParent,
1088                                 short replication,
1089                                 long blockSize,
1090                                 Progressable progress,
1091                                 int buffersize)
1092        throws IOException {
1093        checkOpen();
1094        if (permission == null) {
1095          permission = FsPermission.getDefault();
1096        }
1097        FsPermission masked = permission.applyUMask(dfsClientConf.uMask);
1098        if(LOG.isDebugEnabled()) {
1099          LOG.debug(src + ": masked=" + masked);
1100        }
1101        final DFSOutputStream result = DFSOutputStream.newStreamForCreate(this,
1102            src, masked, flag, createParent, replication, blockSize, progress,
1103            buffersize, dfsClientConf.createChecksum());
1104        leaserenewer.put(src, result, this);
1105        return result;
1106      }
1107      
1108      /**
1109       * Append to an existing file if {@link CreateFlag#APPEND} is present
1110       */
1111      private DFSOutputStream primitiveAppend(String src, EnumSet<CreateFlag> flag,
1112          int buffersize, Progressable progress) throws IOException {
1113        if (flag.contains(CreateFlag.APPEND)) {
1114          HdfsFileStatus stat = getFileInfo(src);
1115          if (stat == null) { // No file to append to
1116            // New file needs to be created if create option is present
1117            if (!flag.contains(CreateFlag.CREATE)) {
1118              throw new FileNotFoundException("failed to append to non-existent file "
1119                  + src + " on client " + clientName);
1120            }
1121            return null;
1122          }
1123          return callAppend(stat, src, buffersize, progress);
1124        }
1125        return null;
1126      }
1127      
1128      /**
1129       * Same as {{@link #create(String, FsPermission, EnumSet, short, long,
1130       *  Progressable, int)} except that the permission
1131       *  is absolute (ie has already been masked with umask.
1132       */
1133      public DFSOutputStream primitiveCreate(String src, 
1134                                 FsPermission absPermission,
1135                                 EnumSet<CreateFlag> flag,
1136                                 boolean createParent,
1137                                 short replication,
1138                                 long blockSize,
1139                                 Progressable progress,
1140                                 int buffersize,
1141                                 int bytesPerChecksum)
1142          throws IOException, UnresolvedLinkException {
1143        checkOpen();
1144        CreateFlag.validate(flag);
1145        DFSOutputStream result = primitiveAppend(src, flag, buffersize, progress);
1146        if (result == null) {
1147          DataChecksum checksum = DataChecksum.newDataChecksum(
1148              dfsClientConf.checksumType,
1149              bytesPerChecksum);
1150          result = DFSOutputStream.newStreamForCreate(this, src, absPermission,
1151              flag, createParent, replication, blockSize, progress, buffersize,
1152              checksum);
1153        }
1154        leaserenewer.put(src, result, this);
1155        return result;
1156      }
1157      
1158      /**
1159       * Creates a symbolic link.
1160       * 
1161       * @see ClientProtocol#createSymlink(String, String,FsPermission, boolean) 
1162       */
1163      public void createSymlink(String target, String link, boolean createParent)
1164          throws IOException {
1165        try {
1166          FsPermission dirPerm = 
1167              FsPermission.getDefault().applyUMask(dfsClientConf.uMask); 
1168          namenode.createSymlink(target, link, dirPerm, createParent);
1169        } catch (RemoteException re) {
1170          throw re.unwrapRemoteException(AccessControlException.class,
1171                                         FileAlreadyExistsException.class, 
1172                                         FileNotFoundException.class,
1173                                         ParentNotDirectoryException.class,
1174                                         NSQuotaExceededException.class, 
1175                                         DSQuotaExceededException.class,
1176                                         UnresolvedPathException.class);
1177        }
1178      }
1179    
1180      /**
1181       * Resolve the *first* symlink, if any, in the path.
1182       * 
1183       * @see ClientProtocol#getLinkTarget(String)
1184       */
1185      public String getLinkTarget(String path) throws IOException { 
1186        checkOpen();
1187        try {
1188          return namenode.getLinkTarget(path);
1189        } catch (RemoteException re) {
1190          throw re.unwrapRemoteException(AccessControlException.class,
1191                                         FileNotFoundException.class);
1192        }
1193      }
1194    
1195      /** Method to get stream returned by append call */
1196      private DFSOutputStream callAppend(HdfsFileStatus stat, String src,
1197          int buffersize, Progressable progress) throws IOException {
1198        LocatedBlock lastBlock = null;
1199        try {
1200          lastBlock = namenode.append(src, clientName);
1201        } catch(RemoteException re) {
1202          throw re.unwrapRemoteException(AccessControlException.class,
1203                                         FileNotFoundException.class,
1204                                         SafeModeException.class,
1205                                         DSQuotaExceededException.class,
1206                                         UnsupportedOperationException.class,
1207                                         UnresolvedPathException.class);
1208        }
1209        return DFSOutputStream.newStreamForAppend(this, src, buffersize, progress,
1210            lastBlock, stat, dfsClientConf.createChecksum());
1211      }
1212      
1213      /**
1214       * Append to an existing HDFS file.  
1215       * 
1216       * @param src file name
1217       * @param buffersize buffer size
1218       * @param progress for reporting write-progress; null is acceptable.
1219       * @param statistics file system statistics; null is acceptable.
1220       * @return an output stream for writing into the file
1221       * 
1222       * @see ClientProtocol#append(String, String) 
1223       */
1224      public HdfsDataOutputStream append(final String src, final int buffersize,
1225          final Progressable progress, final FileSystem.Statistics statistics
1226          ) throws IOException {
1227        final DFSOutputStream out = append(src, buffersize, progress);
1228        return new HdfsDataOutputStream(out, statistics, out.getInitialLen());
1229      }
1230    
1231      private DFSOutputStream append(String src, int buffersize, Progressable progress) 
1232          throws IOException {
1233        checkOpen();
1234        HdfsFileStatus stat = getFileInfo(src);
1235        if (stat == null) { // No file found
1236          throw new FileNotFoundException("failed to append to non-existent file "
1237              + src + " on client " + clientName);
1238        }
1239        final DFSOutputStream result = callAppend(stat, src, buffersize, progress);
1240        leaserenewer.put(src, result, this);
1241        return result;
1242      }
1243    
1244      /**
1245       * Set replication for an existing file.
1246       * @param src file name
1247       * @param replication
1248       * 
1249       * @see ClientProtocol#setReplication(String, short)
1250       */
1251      public boolean setReplication(String src, short replication)
1252          throws IOException {
1253        try {
1254          return namenode.setReplication(src, replication);
1255        } catch(RemoteException re) {
1256          throw re.unwrapRemoteException(AccessControlException.class,
1257                                         FileNotFoundException.class,
1258                                         SafeModeException.class,
1259                                         DSQuotaExceededException.class,
1260                                         UnresolvedPathException.class);
1261        }
1262      }
1263    
1264      /**
1265       * Rename file or directory.
1266       * @see ClientProtocol#rename(String, String)
1267       * @deprecated Use {@link #rename(String, String, Options.Rename...)} instead.
1268       */
1269      @Deprecated
1270      public boolean rename(String src, String dst) throws IOException {
1271        checkOpen();
1272        try {
1273          return namenode.rename(src, dst);
1274        } catch(RemoteException re) {
1275          throw re.unwrapRemoteException(AccessControlException.class,
1276                                         NSQuotaExceededException.class,
1277                                         DSQuotaExceededException.class,
1278                                         UnresolvedPathException.class);
1279        }
1280      }
1281    
1282      /**
1283       * Move blocks from src to trg and delete src
1284       * See {@link ClientProtocol#concat(String, String [])}. 
1285       */
1286      public void concat(String trg, String [] srcs) throws IOException {
1287        checkOpen();
1288        try {
1289          namenode.concat(trg, srcs);
1290        } catch(RemoteException re) {
1291          throw re.unwrapRemoteException(AccessControlException.class,
1292                                         UnresolvedPathException.class);
1293        }
1294      }
1295      /**
1296       * Rename file or directory.
1297       * @see ClientProtocol#rename2(String, String, Options.Rename...)
1298       */
1299      public void rename(String src, String dst, Options.Rename... options)
1300          throws IOException {
1301        checkOpen();
1302        try {
1303          namenode.rename2(src, dst, options);
1304        } catch(RemoteException re) {
1305          throw re.unwrapRemoteException(AccessControlException.class,
1306                                         DSQuotaExceededException.class,
1307                                         FileAlreadyExistsException.class,
1308                                         FileNotFoundException.class,
1309                                         ParentNotDirectoryException.class,
1310                                         SafeModeException.class,
1311                                         NSQuotaExceededException.class,
1312                                         UnresolvedPathException.class);
1313        }
1314      }
1315      /**
1316       * Delete file or directory.
1317       * See {@link ClientProtocol#delete(String)}. 
1318       */
1319      @Deprecated
1320      public boolean delete(String src) throws IOException {
1321        checkOpen();
1322        return namenode.delete(src, true);
1323      }
1324    
1325      /**
1326       * delete file or directory.
1327       * delete contents of the directory if non empty and recursive 
1328       * set to true
1329       *
1330       * @see ClientProtocol#delete(String, boolean)
1331       */
1332      public boolean delete(String src, boolean recursive) throws IOException {
1333        checkOpen();
1334        try {
1335          return namenode.delete(src, recursive);
1336        } catch(RemoteException re) {
1337          throw re.unwrapRemoteException(AccessControlException.class,
1338                                         FileNotFoundException.class,
1339                                         SafeModeException.class,
1340                                         UnresolvedPathException.class);
1341        }
1342      }
1343      
1344      /** Implemented using getFileInfo(src)
1345       */
1346      public boolean exists(String src) throws IOException {
1347        checkOpen();
1348        return getFileInfo(src) != null;
1349      }
1350    
1351      /**
1352       * Get a partial listing of the indicated directory
1353       * No block locations need to be fetched
1354       */
1355      public DirectoryListing listPaths(String src,  byte[] startAfter)
1356        throws IOException {
1357        return listPaths(src, startAfter, false);
1358      }
1359      
1360      /**
1361       * Get a partial listing of the indicated directory
1362       *
1363       * Recommend to use HdfsFileStatus.EMPTY_NAME as startAfter
1364       * if the application wants to fetch a listing starting from
1365       * the first entry in the directory
1366       *
1367       * @see ClientProtocol#getListing(String, byte[], boolean)
1368       */
1369      public DirectoryListing listPaths(String src,  byte[] startAfter,
1370          boolean needLocation) 
1371        throws IOException {
1372        checkOpen();
1373        try {
1374          return namenode.getListing(src, startAfter, needLocation);
1375        } catch(RemoteException re) {
1376          throw re.unwrapRemoteException(AccessControlException.class,
1377                                         FileNotFoundException.class,
1378                                         UnresolvedPathException.class);
1379        }
1380      }
1381    
1382      /**
1383       * Get the file info for a specific file or directory.
1384       * @param src The string representation of the path to the file
1385       * @return object containing information regarding the file
1386       *         or null if file not found
1387       *         
1388       * @see ClientProtocol#getFileInfo(String) for description of exceptions
1389       */
1390      public HdfsFileStatus getFileInfo(String src) throws IOException {
1391        checkOpen();
1392        try {
1393          return namenode.getFileInfo(src);
1394        } catch(RemoteException re) {
1395          throw re.unwrapRemoteException(AccessControlException.class,
1396                                         FileNotFoundException.class,
1397                                         UnresolvedPathException.class);
1398        }
1399      }
1400    
1401      /**
1402       * Get the file info for a specific file or directory. If src
1403       * refers to a symlink then the FileStatus of the link is returned.
1404       * @param src path to a file or directory.
1405       * 
1406       * For description of exceptions thrown 
1407       * @see ClientProtocol#getFileLinkInfo(String)
1408       */
1409      public HdfsFileStatus getFileLinkInfo(String src) throws IOException {
1410        checkOpen();
1411        try {
1412          return namenode.getFileLinkInfo(src);
1413        } catch(RemoteException re) {
1414          throw re.unwrapRemoteException(AccessControlException.class,
1415                                         UnresolvedPathException.class);
1416         }
1417       }
1418    
1419      /**
1420       * Get the checksum of a file.
1421       * @param src The file path
1422       * @return The checksum 
1423       * @see DistributedFileSystem#getFileChecksum(Path)
1424       */
1425      public MD5MD5CRC32FileChecksum getFileChecksum(String src) throws IOException {
1426        checkOpen();
1427        return getFileChecksum(src, namenode, socketFactory, dfsClientConf.socketTimeout);    
1428      }
1429    
1430      /**
1431       * Get the checksum of a file.
1432       * @param src The file path
1433       * @return The checksum 
1434       */
1435      public static MD5MD5CRC32FileChecksum getFileChecksum(String src,
1436          ClientProtocol namenode, SocketFactory socketFactory, int socketTimeout
1437          ) throws IOException {
1438        //get all block locations
1439        LocatedBlocks blockLocations = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE);
1440        if (null == blockLocations) {
1441          throw new FileNotFoundException("File does not exist: " + src);
1442        }
1443        List<LocatedBlock> locatedblocks = blockLocations.getLocatedBlocks();
1444        final DataOutputBuffer md5out = new DataOutputBuffer();
1445        int bytesPerCRC = 0;
1446        long crcPerBlock = 0;
1447        boolean refetchBlocks = false;
1448        int lastRetriedIndex = -1;
1449    
1450        //get block checksum for each block
1451        for(int i = 0; i < locatedblocks.size(); i++) {
1452          if (refetchBlocks) {  // refetch to get fresh tokens
1453            blockLocations = callGetBlockLocations(namenode, src, 0, Long.MAX_VALUE);
1454            if (null == blockLocations) {
1455              throw new FileNotFoundException("File does not exist: " + src);
1456            }
1457            locatedblocks = blockLocations.getLocatedBlocks();
1458            refetchBlocks = false;
1459          }
1460          LocatedBlock lb = locatedblocks.get(i);
1461          final ExtendedBlock block = lb.getBlock();
1462          final DatanodeInfo[] datanodes = lb.getLocations();
1463          
1464          //try each datanode location of the block
1465          final int timeout = 3000 * datanodes.length + socketTimeout;
1466          boolean done = false;
1467          for(int j = 0; !done && j < datanodes.length; j++) {
1468            Socket sock = null;
1469            DataOutputStream out = null;
1470            DataInputStream in = null;
1471            
1472            try {
1473              //connect to a datanode
1474              sock = socketFactory.createSocket();
1475              NetUtils.connect(sock,
1476                  NetUtils.createSocketAddr(datanodes[j].getXferAddr()),
1477                  timeout);
1478              sock.setSoTimeout(timeout);
1479    
1480              out = new DataOutputStream(
1481                  new BufferedOutputStream(NetUtils.getOutputStream(sock), 
1482                                           HdfsConstants.SMALL_BUFFER_SIZE));
1483              in = new DataInputStream(NetUtils.getInputStream(sock));
1484    
1485              if (LOG.isDebugEnabled()) {
1486                LOG.debug("write to " + datanodes[j] + ": "
1487                    + Op.BLOCK_CHECKSUM + ", block=" + block);
1488              }
1489              // get block MD5
1490              new Sender(out).blockChecksum(block, lb.getBlockToken());
1491    
1492              final BlockOpResponseProto reply =
1493                BlockOpResponseProto.parseFrom(HdfsProtoUtil.vintPrefixed(in));
1494    
1495              if (reply.getStatus() != Status.SUCCESS) {
1496                if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN
1497                    && i > lastRetriedIndex) {
1498                  if (LOG.isDebugEnabled()) {
1499                    LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM "
1500                        + "for file " + src + " for block " + block
1501                        + " from datanode " + datanodes[j]
1502                        + ". Will retry the block once.");
1503                  }
1504                  lastRetriedIndex = i;
1505                  done = true; // actually it's not done; but we'll retry
1506                  i--; // repeat at i-th block
1507                  refetchBlocks = true;
1508                  break;
1509                } else {
1510                  throw new IOException("Bad response " + reply + " for block "
1511                      + block + " from datanode " + datanodes[j]);
1512                }
1513              }
1514              
1515              OpBlockChecksumResponseProto checksumData =
1516                reply.getChecksumResponse();
1517    
1518              //read byte-per-checksum
1519              final int bpc = checksumData.getBytesPerCrc();
1520              if (i == 0) { //first block
1521                bytesPerCRC = bpc;
1522              }
1523              else if (bpc != bytesPerCRC) {
1524                throw new IOException("Byte-per-checksum not matched: bpc=" + bpc
1525                    + " but bytesPerCRC=" + bytesPerCRC);
1526              }
1527              
1528              //read crc-per-block
1529              final long cpb = checksumData.getCrcPerBlock();
1530              if (locatedblocks.size() > 1 && i == 0) {
1531                crcPerBlock = cpb;
1532              }
1533    
1534              //read md5
1535              final MD5Hash md5 = new MD5Hash(
1536                  checksumData.getMd5().toByteArray());
1537              md5.write(md5out);
1538              
1539              done = true;
1540    
1541              if (LOG.isDebugEnabled()) {
1542                if (i == 0) {
1543                  LOG.debug("set bytesPerCRC=" + bytesPerCRC
1544                      + ", crcPerBlock=" + crcPerBlock);
1545                }
1546                LOG.debug("got reply from " + datanodes[j] + ": md5=" + md5);
1547              }
1548            } catch (IOException ie) {
1549              LOG.warn("src=" + src + ", datanodes["+j+"]=" + datanodes[j], ie);
1550            } finally {
1551              IOUtils.closeStream(in);
1552              IOUtils.closeStream(out);
1553              IOUtils.closeSocket(sock);        
1554            }
1555          }
1556    
1557          if (!done) {
1558            throw new IOException("Fail to get block MD5 for " + block);
1559          }
1560        }
1561    
1562        //compute file MD5
1563        final MD5Hash fileMD5 = MD5Hash.digest(md5out.getData()); 
1564        return new MD5MD5CRC32FileChecksum(bytesPerCRC, crcPerBlock, fileMD5);
1565      }
1566    
1567      /**
1568       * Set permissions to a file or directory.
1569       * @param src path name.
1570       * @param permission
1571       * 
1572       * @see ClientProtocol#setPermission(String, FsPermission)
1573       */
1574      public void setPermission(String src, FsPermission permission)
1575          throws IOException {
1576        checkOpen();
1577        try {
1578          namenode.setPermission(src, permission);
1579        } catch(RemoteException re) {
1580          throw re.unwrapRemoteException(AccessControlException.class,
1581                                         FileNotFoundException.class,
1582                                         SafeModeException.class,
1583                                         UnresolvedPathException.class);
1584        }
1585      }
1586    
1587      /**
1588       * Set file or directory owner.
1589       * @param src path name.
1590       * @param username user id.
1591       * @param groupname user group.
1592       * 
1593       * @see ClientProtocol#setOwner(String, String, String)
1594       */
1595      public void setOwner(String src, String username, String groupname)
1596          throws IOException {
1597        checkOpen();
1598        try {
1599          namenode.setOwner(src, username, groupname);
1600        } catch(RemoteException re) {
1601          throw re.unwrapRemoteException(AccessControlException.class,
1602                                         FileNotFoundException.class,
1603                                         SafeModeException.class,
1604                                         UnresolvedPathException.class);                                   
1605        }
1606      }
1607    
1608      /**
1609       * @see ClientProtocol#getStats()
1610       */
1611      public FsStatus getDiskStatus() throws IOException {
1612        long rawNums[] = namenode.getStats();
1613        return new FsStatus(rawNums[0], rawNums[1], rawNums[2]);
1614      }
1615    
1616      /**
1617       * Returns count of blocks with no good replicas left. Normally should be 
1618       * zero.
1619       * @throws IOException
1620       */ 
1621      public long getMissingBlocksCount() throws IOException {
1622        return namenode.getStats()[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX];
1623      }
1624      
1625      /**
1626       * Returns count of blocks with one of more replica missing.
1627       * @throws IOException
1628       */ 
1629      public long getUnderReplicatedBlocksCount() throws IOException {
1630        return namenode.getStats()[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX];
1631      }
1632      
1633      /**
1634       * Returns count of blocks with at least one replica marked corrupt. 
1635       * @throws IOException
1636       */ 
1637      public long getCorruptBlocksCount() throws IOException {
1638        return namenode.getStats()[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX];
1639      }
1640      
1641      /**
1642       * @return a list in which each entry describes a corrupt file/block
1643       * @throws IOException
1644       */
1645      public CorruptFileBlocks listCorruptFileBlocks(String path,
1646                                                     String cookie)
1647        throws IOException {
1648        return namenode.listCorruptFileBlocks(path, cookie);
1649      }
1650    
1651      public DatanodeInfo[] datanodeReport(DatanodeReportType type)
1652      throws IOException {
1653        return namenode.getDatanodeReport(type);
1654      }
1655        
1656      /**
1657       * Enter, leave or get safe mode.
1658       * 
1659       * @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction)
1660       */
1661      public boolean setSafeMode(SafeModeAction action) throws IOException {
1662        return namenode.setSafeMode(action);
1663      }
1664    
1665      /**
1666       * Save namespace image.
1667       * 
1668       * @see ClientProtocol#saveNamespace()
1669       */
1670      void saveNamespace() throws AccessControlException, IOException {
1671        try {
1672          namenode.saveNamespace();
1673        } catch(RemoteException re) {
1674          throw re.unwrapRemoteException(AccessControlException.class);
1675        }
1676      }
1677      
1678      /**
1679       * enable/disable restore failed storage.
1680       * 
1681       * @see ClientProtocol#restoreFailedStorage(String arg)
1682       */
1683      boolean restoreFailedStorage(String arg)
1684          throws AccessControlException, IOException{
1685        return namenode.restoreFailedStorage(arg);
1686      }
1687    
1688      /**
1689       * Refresh the hosts and exclude files.  (Rereads them.)
1690       * See {@link ClientProtocol#refreshNodes()} 
1691       * for more details.
1692       * 
1693       * @see ClientProtocol#refreshNodes()
1694       */
1695      public void refreshNodes() throws IOException {
1696        namenode.refreshNodes();
1697      }
1698    
1699      /**
1700       * Dumps DFS data structures into specified file.
1701       * 
1702       * @see ClientProtocol#metaSave(String)
1703       */
1704      public void metaSave(String pathname) throws IOException {
1705        namenode.metaSave(pathname);
1706      }
1707    
1708      /**
1709       * Requests the namenode to tell all datanodes to use a new, non-persistent
1710       * bandwidth value for dfs.balance.bandwidthPerSec.
1711       * See {@link ClientProtocol#setBalancerBandwidth(long)} 
1712       * for more details.
1713       * 
1714       * @see ClientProtocol#setBalancerBandwidth(long)
1715       */
1716      public void setBalancerBandwidth(long bandwidth) throws IOException {
1717        namenode.setBalancerBandwidth(bandwidth);
1718      }
1719        
1720      /**
1721       * @see ClientProtocol#finalizeUpgrade()
1722       */
1723      public void finalizeUpgrade() throws IOException {
1724        namenode.finalizeUpgrade();
1725      }
1726    
1727      /**
1728       * @see ClientProtocol#distributedUpgradeProgress(HdfsConstants.UpgradeAction)
1729       */
1730      public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action)
1731          throws IOException {
1732        return namenode.distributedUpgradeProgress(action);
1733      }
1734    
1735      /**
1736       */
1737      @Deprecated
1738      public boolean mkdirs(String src) throws IOException {
1739        return mkdirs(src, null, true);
1740      }
1741    
1742      /**
1743       * Create a directory (or hierarchy of directories) with the given
1744       * name and permission.
1745       *
1746       * @param src The path of the directory being created
1747       * @param permission The permission of the directory being created.
1748       * If permission == null, use {@link FsPermission#getDefault()}.
1749       * @param createParent create missing parent directory if true
1750       * 
1751       * @return True if the operation success.
1752       * 
1753       * @see ClientProtocol#mkdirs(String, FsPermission, boolean)
1754       */
1755      public boolean mkdirs(String src, FsPermission permission,
1756          boolean createParent) throws IOException {
1757        checkOpen();
1758        if (permission == null) {
1759          permission = FsPermission.getDefault();
1760        }
1761        FsPermission masked = permission.applyUMask(dfsClientConf.uMask);
1762        if(LOG.isDebugEnabled()) {
1763          LOG.debug(src + ": masked=" + masked);
1764        }
1765        try {
1766          return namenode.mkdirs(src, masked, createParent);
1767        } catch(RemoteException re) {
1768          throw re.unwrapRemoteException(AccessControlException.class,
1769                                         InvalidPathException.class,
1770                                         FileAlreadyExistsException.class,
1771                                         FileNotFoundException.class,
1772                                         ParentNotDirectoryException.class,
1773                                         SafeModeException.class,
1774                                         NSQuotaExceededException.class,
1775                                         UnresolvedPathException.class);
1776        }
1777      }
1778      
1779      /**
1780       * Same {{@link #mkdirs(String, FsPermission, boolean)} except
1781       * that the permissions has already been masked against umask.
1782       */
1783      public boolean primitiveMkdir(String src, FsPermission absPermission)
1784        throws IOException {
1785        checkOpen();
1786        if (absPermission == null) {
1787          absPermission = 
1788            FsPermission.getDefault().applyUMask(dfsClientConf.uMask);
1789        } 
1790    
1791        if(LOG.isDebugEnabled()) {
1792          LOG.debug(src + ": masked=" + absPermission);
1793        }
1794        try {
1795          return namenode.mkdirs(src, absPermission, true);
1796        } catch(RemoteException re) {
1797          throw re.unwrapRemoteException(AccessControlException.class,
1798                                         NSQuotaExceededException.class,
1799                                         DSQuotaExceededException.class,
1800                                         UnresolvedPathException.class);
1801        }
1802      }
1803    
1804      /**
1805       * Get {@link ContentSummary} rooted at the specified directory.
1806       * @param path The string representation of the path
1807       * 
1808       * @see ClientProtocol#getContentSummary(String)
1809       */
1810      ContentSummary getContentSummary(String src) throws IOException {
1811        try {
1812          return namenode.getContentSummary(src);
1813        } catch(RemoteException re) {
1814          throw re.unwrapRemoteException(AccessControlException.class,
1815                                         FileNotFoundException.class,
1816                                         UnresolvedPathException.class);
1817        }
1818      }
1819    
1820      /**
1821       * Sets or resets quotas for a directory.
1822       * @see ClientProtocol#setQuota(String, long, long)
1823       */
1824      void setQuota(String src, long namespaceQuota, long diskspaceQuota) 
1825          throws IOException {
1826        // sanity check
1827        if ((namespaceQuota <= 0 && namespaceQuota != HdfsConstants.QUOTA_DONT_SET &&
1828             namespaceQuota != HdfsConstants.QUOTA_RESET) ||
1829            (diskspaceQuota <= 0 && diskspaceQuota != HdfsConstants.QUOTA_DONT_SET &&
1830             diskspaceQuota != HdfsConstants.QUOTA_RESET)) {
1831          throw new IllegalArgumentException("Invalid values for quota : " +
1832                                             namespaceQuota + " and " + 
1833                                             diskspaceQuota);
1834                                             
1835        }
1836        try {
1837          namenode.setQuota(src, namespaceQuota, diskspaceQuota);
1838        } catch(RemoteException re) {
1839          throw re.unwrapRemoteException(AccessControlException.class,
1840                                         FileNotFoundException.class,
1841                                         NSQuotaExceededException.class,
1842                                         DSQuotaExceededException.class,
1843                                         UnresolvedPathException.class);
1844        }
1845      }
1846    
1847      /**
1848       * set the modification and access time of a file
1849       * 
1850       * @see ClientProtocol#setTimes(String, long, long)
1851       */
1852      public void setTimes(String src, long mtime, long atime) throws IOException {
1853        checkOpen();
1854        try {
1855          namenode.setTimes(src, mtime, atime);
1856        } catch(RemoteException re) {
1857          throw re.unwrapRemoteException(AccessControlException.class,
1858                                         FileNotFoundException.class,
1859                                         UnresolvedPathException.class);
1860        }
1861      }
1862    
1863      /**
1864       * @deprecated use {@link HdfsDataInputStream} instead.
1865       */
1866      @Deprecated
1867      public static class DFSDataInputStream extends HdfsDataInputStream {
1868    
1869        public DFSDataInputStream(DFSInputStream in) throws IOException {
1870          super(in);
1871        }
1872      }
1873      
1874      boolean shouldTryShortCircuitRead(InetSocketAddress targetAddr) {
1875        if (shortCircuitLocalReads && isLocalAddress(targetAddr)) {
1876          return true;
1877        }
1878        return false;
1879      }
1880    
1881      void reportChecksumFailure(String file, ExtendedBlock blk, DatanodeInfo dn) {
1882        DatanodeInfo [] dnArr = { dn };
1883        LocatedBlock [] lblocks = { new LocatedBlock(blk, dnArr) };
1884        reportChecksumFailure(file, lblocks);
1885      }
1886        
1887      // just reports checksum failure and ignores any exception during the report.
1888      void reportChecksumFailure(String file, LocatedBlock lblocks[]) {
1889        try {
1890          reportBadBlocks(lblocks);
1891        } catch (IOException ie) {
1892          LOG.info("Found corruption while reading " + file
1893              + ".  Error repairing corrupt blocks.  Bad blocks remain.", ie);
1894        }
1895      }
1896    
1897      @Override
1898      public String toString() {
1899        return getClass().getSimpleName() + "[clientName=" + clientName
1900            + ", ugi=" + ugi + "]"; 
1901      }
1902    
1903      void disableShortCircuit() {
1904        shortCircuitLocalReads = false;
1905      }
1906    }