001    /**
002     * Licensed to the Apache Software Foundation (ASF) under one
003     * or more contributor license agreements.  See the NOTICE file
004     * distributed with this work for additional information
005     * regarding copyright ownership.  The ASF licenses this file
006     * to you under the Apache License, Version 2.0 (the
007     * "License"); you may not use this file except in compliance
008     * with the License.  You may obtain a copy of the License at
009     *
010     *     http://www.apache.org/licenses/LICENSE-2.0
011     *
012     * Unless required by applicable law or agreed to in writing, software
013     * distributed under the License is distributed on an "AS IS" BASIS,
014     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015     * See the License for the specific language governing permissions and
016     * limitations under the License.
017     */
018    
019    package org.apache.hadoop.hdfs;
020    
021    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
022    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT;
023    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY;
024    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
025    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
026    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
027    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT;
028    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
029    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT;
030    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
031    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
032    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
033    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
034    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
035    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
036    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYPASSWORD_KEY;
037    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY;
038    import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY;
039    
040    import java.io.IOException;
041    import java.io.PrintStream;
042    import java.io.UnsupportedEncodingException;
043    import java.net.InetAddress;
044    import java.net.InetSocketAddress;
045    import java.net.URI;
046    import java.net.URISyntaxException;
047    import java.security.SecureRandom;
048    import java.text.SimpleDateFormat;
049    import java.util.Arrays;
050    import java.util.Collection;
051    import java.util.Collections;
052    import java.util.Comparator;
053    import java.util.Date;
054    import java.util.HashSet;
055    import java.util.List;
056    import java.util.Locale;
057    import java.util.Map;
058    import java.util.Random;
059    import java.util.Set;
060    
061    import javax.net.SocketFactory;
062    
063    import com.google.common.collect.Sets;
064    import org.apache.commons.cli.CommandLine;
065    import org.apache.commons.cli.CommandLineParser;
066    import org.apache.commons.cli.Option;
067    import org.apache.commons.cli.Options;
068    import org.apache.commons.cli.ParseException;
069    import org.apache.commons.cli.PosixParser;
070    import org.apache.commons.logging.Log;
071    import org.apache.commons.logging.LogFactory;
072    import org.apache.hadoop.HadoopIllegalArgumentException;
073    import org.apache.hadoop.classification.InterfaceAudience;
074    import org.apache.hadoop.conf.Configuration;
075    import org.apache.hadoop.crypto.key.KeyProvider;
076    import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
077    import org.apache.hadoop.crypto.key.KeyProviderFactory;
078    import org.apache.hadoop.fs.BlockLocation;
079    import org.apache.hadoop.fs.CommonConfigurationKeys;
080    import org.apache.hadoop.fs.FileSystem;
081    import org.apache.hadoop.fs.Path;
082    import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
083    import org.apache.hadoop.hdfs.protocol.DatanodeID;
084    import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
085    import org.apache.hadoop.hdfs.protocol.HdfsConstants;
086    import org.apache.hadoop.hdfs.protocol.LocatedBlock;
087    import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
088    import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
089    import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
090    import org.apache.hadoop.hdfs.server.namenode.NameNode;
091    import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
092    import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
093    import org.apache.hadoop.http.HttpConfig;
094    import org.apache.hadoop.http.HttpServer2;
095    import org.apache.hadoop.ipc.ProtobufRpcEngine;
096    import org.apache.hadoop.ipc.RPC;
097    import org.apache.hadoop.net.NetUtils;
098    import org.apache.hadoop.net.NodeBase;
099    import org.apache.hadoop.security.SecurityUtil;
100    import org.apache.hadoop.security.UserGroupInformation;
101    import org.apache.hadoop.security.authorize.AccessControlList;
102    import org.apache.hadoop.util.StringUtils;
103    import org.apache.hadoop.util.ToolRunner;
104    
105    import com.google.common.annotations.VisibleForTesting;
106    import com.google.common.base.Charsets;
107    import com.google.common.base.Joiner;
108    import com.google.common.base.Preconditions;
109    import com.google.common.collect.Lists;
110    import com.google.common.collect.Maps;
111    import com.google.common.primitives.SignedBytes;
112    import com.google.protobuf.BlockingService;
113    
114    @InterfaceAudience.Private
115    public class DFSUtil {
116      public static final Log LOG = LogFactory.getLog(DFSUtil.class.getName());
117      
118      public static final byte[] EMPTY_BYTES = {};
119    
120      /** Compare two byte arrays by lexicographical order. */
121      public static int compareBytes(byte[] left, byte[] right) {
122        if (left == null) {
123          left = EMPTY_BYTES;
124        }
125        if (right == null) {
126          right = EMPTY_BYTES;
127        }
128        return SignedBytes.lexicographicalComparator().compare(left, right);
129      }
130    
131      private DFSUtil() { /* Hidden constructor */ }
132      private static final ThreadLocal<Random> RANDOM = new ThreadLocal<Random>() {
133        @Override
134        protected Random initialValue() {
135          return new Random();
136        }
137      };
138      
139      private static final ThreadLocal<SecureRandom> SECURE_RANDOM = new ThreadLocal<SecureRandom>() {
140        @Override
141        protected SecureRandom initialValue() {
142          return new SecureRandom();
143        }
144      };
145    
146      /** @return a pseudo random number generator. */
147      public static Random getRandom() {
148        return RANDOM.get();
149      }
150      
151      /** @return a pseudo secure random number generator. */
152      public static SecureRandom getSecureRandom() {
153        return SECURE_RANDOM.get();
154      }
155    
156      /** Shuffle the elements in the given array. */
157      public static <T> T[] shuffle(final T[] array) {
158        if (array != null && array.length > 0) {
159          final Random random = getRandom();
160          for (int n = array.length; n > 1; ) {
161            final int randomIndex = random.nextInt(n);
162            n--;
163            if (n != randomIndex) {
164              final T tmp = array[randomIndex];
165              array[randomIndex] = array[n];
166              array[n] = tmp;
167            }
168          }
169        }
170        return array;
171      }
172    
173      /**
174       * Compartor for sorting DataNodeInfo[] based on decommissioned states.
175       * Decommissioned nodes are moved to the end of the array on sorting with
176       * this compartor.
177       */
178      public static final Comparator<DatanodeInfo> DECOM_COMPARATOR = 
179        new Comparator<DatanodeInfo>() {
180          @Override
181          public int compare(DatanodeInfo a, DatanodeInfo b) {
182            return a.isDecommissioned() == b.isDecommissioned() ? 0 : 
183              a.isDecommissioned() ? 1 : -1;
184          }
185        };
186        
187          
188      /**
189       * Comparator for sorting DataNodeInfo[] based on decommissioned/stale states.
190       * Decommissioned/stale nodes are moved to the end of the array on sorting
191       * with this comparator.
192       */ 
193      @InterfaceAudience.Private 
194      public static class DecomStaleComparator implements Comparator<DatanodeInfo> {
195        private final long staleInterval;
196    
197        /**
198         * Constructor of DecomStaleComparator
199         * 
200         * @param interval
201         *          The time interval for marking datanodes as stale is passed from
202         *          outside, since the interval may be changed dynamically
203         */
204        public DecomStaleComparator(long interval) {
205          this.staleInterval = interval;
206        }
207    
208        @Override
209        public int compare(DatanodeInfo a, DatanodeInfo b) {
210          // Decommissioned nodes will still be moved to the end of the list
211          if (a.isDecommissioned()) {
212            return b.isDecommissioned() ? 0 : 1;
213          } else if (b.isDecommissioned()) {
214            return -1;
215          }
216          // Stale nodes will be moved behind the normal nodes
217          boolean aStale = a.isStale(staleInterval);
218          boolean bStale = b.isStale(staleInterval);
219          return aStale == bStale ? 0 : (aStale ? 1 : -1);
220        }
221      }    
222        
223      /**
224       * Address matcher for matching an address to local address
225       */
226      static final AddressMatcher LOCAL_ADDRESS_MATCHER = new AddressMatcher() {
227        @Override
228        public boolean match(InetSocketAddress s) {
229          return NetUtils.isLocalAddress(s.getAddress());
230        };
231      };
232      
233      /**
234       * Whether the pathname is valid.  Currently prohibits relative paths, 
235       * names which contain a ":" or "//", or other non-canonical paths.
236       */
237      public static boolean isValidName(String src) {
238        // Path must be absolute.
239        if (!src.startsWith(Path.SEPARATOR)) {
240          return false;
241        }
242          
243        // Check for ".." "." ":" "/"
244        String[] components = StringUtils.split(src, '/');
245        for (int i = 0; i < components.length; i++) {
246          String element = components[i];
247          if (element.equals(".")  ||
248              (element.indexOf(":") >= 0)  ||
249              (element.indexOf("/") >= 0)) {
250            return false;
251          }
252          // ".." is allowed in path starting with /.reserved/.inodes
253          if (element.equals("..")) {
254            if (components.length > 4
255                && components[1].equals(FSDirectory.DOT_RESERVED_STRING)
256                && components[2].equals(FSDirectory.DOT_INODES_STRING)) {
257              continue;
258            }
259            return false;
260          }
261          // The string may start or end with a /, but not have
262          // "//" in the middle.
263          if (element.isEmpty() && i != components.length - 1 &&
264              i != 0) {
265            return false;
266          }
267        }
268        return true;
269      }
270    
271      /**
272       * Checks if a string is a valid path component. For instance, components
273       * cannot contain a ":" or "/", and cannot be equal to a reserved component
274       * like ".snapshot".
275       * <p>
276       * The primary use of this method is for validating paths when loading the
277       * FSImage. During normal NN operation, paths are sometimes allowed to
278       * contain reserved components.
279       * 
280       * @return If component is valid
281       */
282      public static boolean isValidNameForComponent(String component) {
283        if (component.equals(".") ||
284            component.equals("..") ||
285            component.indexOf(":") >= 0 ||
286            component.indexOf("/") >= 0) {
287          return false;
288        }
289        return !isReservedPathComponent(component);
290      }
291    
292    
293      /**
294       * Returns if the component is reserved.
295       * 
296       * <p>
297       * Note that some components are only reserved under certain directories, e.g.
298       * "/.reserved" is reserved, while "/hadoop/.reserved" is not.
299       * @return true, if the component is reserved
300       */
301      public static boolean isReservedPathComponent(String component) {
302        for (String reserved : HdfsConstants.RESERVED_PATH_COMPONENTS) {
303          if (component.equals(reserved)) {
304            return true;
305          }
306        }
307        return false;
308      }
309    
310      /**
311       * Converts a byte array to a string using UTF8 encoding.
312       */
313      public static String bytes2String(byte[] bytes) {
314        return bytes2String(bytes, 0, bytes.length);
315      }
316      
317      /**
318       * Decode a specific range of bytes of the given byte array to a string
319       * using UTF8.
320       * 
321       * @param bytes The bytes to be decoded into characters
322       * @param offset The index of the first byte to decode
323       * @param length The number of bytes to decode
324       * @return The decoded string
325       */
326      public static String bytes2String(byte[] bytes, int offset, int length) {
327        try {
328          return new String(bytes, offset, length, "UTF8");
329        } catch(UnsupportedEncodingException e) {
330          assert false : "UTF8 encoding is not supported ";
331        }
332        return null;
333      }
334    
335      /**
336       * Converts a string to a byte array using UTF8 encoding.
337       */
338      public static byte[] string2Bytes(String str) {
339        return str.getBytes(Charsets.UTF_8);
340      }
341    
342      /**
343       * Given a list of path components returns a path as a UTF8 String
344       */
345      public static String byteArray2PathString(byte[][] pathComponents) {
346        if (pathComponents.length == 0) {
347          return "";
348        } else if (pathComponents.length == 1
349            && (pathComponents[0] == null || pathComponents[0].length == 0)) {
350          return Path.SEPARATOR;
351        }
352        StringBuilder result = new StringBuilder();
353        for (int i = 0; i < pathComponents.length; i++) {
354          result.append(new String(pathComponents[i], Charsets.UTF_8));
355          if (i < pathComponents.length - 1) {
356            result.append(Path.SEPARATOR_CHAR);
357          }
358        }
359        return result.toString();
360      }
361    
362      /**
363       * Converts a list of path components into a path using Path.SEPARATOR.
364       * 
365       * @param components Path components
366       * @return Combined path as a UTF-8 string
367       */
368      public static String strings2PathString(String[] components) {
369        if (components.length == 0) {
370          return "";
371        }
372        if (components.length == 1) {
373          if (components[0] == null || components[0].isEmpty()) {
374            return Path.SEPARATOR;
375          }
376        }
377        return Joiner.on(Path.SEPARATOR).join(components);
378      }
379    
380      /**
381       * Given a list of path components returns a byte array
382       */
383      public static byte[] byteArray2bytes(byte[][] pathComponents) {
384        if (pathComponents.length == 0) {
385          return EMPTY_BYTES;
386        } else if (pathComponents.length == 1
387            && (pathComponents[0] == null || pathComponents[0].length == 0)) {
388          return new byte[]{(byte) Path.SEPARATOR_CHAR};
389        }
390        int length = 0;
391        for (int i = 0; i < pathComponents.length; i++) {
392          length += pathComponents[i].length;
393          if (i < pathComponents.length - 1) {
394            length++; // for SEPARATOR
395          }
396        }
397        byte[] path = new byte[length];
398        int index = 0;
399        for (int i = 0; i < pathComponents.length; i++) {
400          System.arraycopy(pathComponents[i], 0, path, index,
401              pathComponents[i].length);
402          index += pathComponents[i].length;
403          if (i < pathComponents.length - 1) {
404            path[index] = (byte) Path.SEPARATOR_CHAR;
405            index++;
406          }
407        }
408        return path;
409      }
410    
411      /** Convert an object representing a path to a string. */
412      public static String path2String(final Object path) {
413        return path == null? null
414            : path instanceof String? (String)path
415            : path instanceof byte[][]? byteArray2PathString((byte[][])path)
416            : path.toString();
417      }
418    
419      /**
420       * Splits the array of bytes into array of arrays of bytes
421       * on byte separator
422       * @param bytes the array of bytes to split
423       * @param separator the delimiting byte
424       */
425      public static byte[][] bytes2byteArray(byte[] bytes, byte separator) {
426        return bytes2byteArray(bytes, bytes.length, separator);
427      }
428    
429      /**
430       * Splits first len bytes in bytes to array of arrays of bytes
431       * on byte separator
432       * @param bytes the byte array to split
433       * @param len the number of bytes to split
434       * @param separator the delimiting byte
435       */
436      public static byte[][] bytes2byteArray(byte[] bytes,
437                                             int len,
438                                             byte separator) {
439        assert len <= bytes.length;
440        int splits = 0;
441        if (len == 0) {
442          return new byte[][]{null};
443        }
444        // Count the splits. Omit multiple separators and the last one
445        for (int i = 0; i < len; i++) {
446          if (bytes[i] == separator) {
447            splits++;
448          }
449        }
450        int last = len - 1;
451        while (last > -1 && bytes[last--] == separator) {
452          splits--;
453        }
454        if (splits == 0 && bytes[0] == separator) {
455          return new byte[][]{null};
456        }
457        splits++;
458        byte[][] result = new byte[splits][];
459        int startIndex = 0;
460        int nextIndex = 0;
461        int index = 0;
462        // Build the splits
463        while (index < splits) {
464          while (nextIndex < len && bytes[nextIndex] != separator) {
465            nextIndex++;
466          }
467          result[index] = new byte[nextIndex - startIndex];
468          System.arraycopy(bytes, startIndex, result[index], 0, nextIndex
469                  - startIndex);
470          index++;
471          startIndex = nextIndex + 1;
472          nextIndex = startIndex;
473        }
474        return result;
475      }
476      
477      /**
478       * Convert a LocatedBlocks to BlockLocations[]
479       * @param blocks a LocatedBlocks
480       * @return an array of BlockLocations
481       */
482      public static BlockLocation[] locatedBlocks2Locations(LocatedBlocks blocks) {
483        if (blocks == null) {
484          return new BlockLocation[0];
485        }
486        return locatedBlocks2Locations(blocks.getLocatedBlocks());
487      }
488      
489      /**
490       * Convert a List<LocatedBlock> to BlockLocation[]
491       * @param blocks A List<LocatedBlock> to be converted
492       * @return converted array of BlockLocation
493       */
494      public static BlockLocation[] locatedBlocks2Locations(List<LocatedBlock> blocks) {
495        if (blocks == null) {
496          return new BlockLocation[0];
497        }
498        int nrBlocks = blocks.size();
499        BlockLocation[] blkLocations = new BlockLocation[nrBlocks];
500        if (nrBlocks == 0) {
501          return blkLocations;
502        }
503        int idx = 0;
504        for (LocatedBlock blk : blocks) {
505          assert idx < nrBlocks : "Incorrect index";
506          DatanodeInfo[] locations = blk.getLocations();
507          String[] hosts = new String[locations.length];
508          String[] xferAddrs = new String[locations.length];
509          String[] racks = new String[locations.length];
510          for (int hCnt = 0; hCnt < locations.length; hCnt++) {
511            hosts[hCnt] = locations[hCnt].getHostName();
512            xferAddrs[hCnt] = locations[hCnt].getXferAddr();
513            NodeBase node = new NodeBase(xferAddrs[hCnt], 
514                                         locations[hCnt].getNetworkLocation());
515            racks[hCnt] = node.toString();
516          }
517          DatanodeInfo[] cachedLocations = blk.getCachedLocations();
518          String[] cachedHosts = new String[cachedLocations.length];
519          for (int i=0; i<cachedLocations.length; i++) {
520            cachedHosts[i] = cachedLocations[i].getHostName();
521          }
522          blkLocations[idx] = new BlockLocation(xferAddrs, hosts, cachedHosts,
523                                                racks,
524                                                blk.getStartOffset(),
525                                                blk.getBlockSize(),
526                                                blk.isCorrupt());
527          idx++;
528        }
529        return blkLocations;
530      }
531    
532      /**
533       * Returns collection of nameservice Ids from the configuration.
534       * @param conf configuration
535       * @return collection of nameservice Ids, or null if not specified
536       */
537      public static Collection<String> getNameServiceIds(Configuration conf) {
538        return conf.getTrimmedStringCollection(DFS_NAMESERVICES);
539      }
540    
541      /**
542       * @return <code>coll</code> if it is non-null and non-empty. Otherwise,
543       * returns a list with a single null value.
544       */
545      private static Collection<String> emptyAsSingletonNull(Collection<String> coll) {
546        if (coll == null || coll.isEmpty()) {
547          return Collections.singletonList(null);
548        } else {
549          return coll;
550        }
551      }
552      
553      /**
554       * Namenode HighAvailability related configuration.
555       * Returns collection of namenode Ids from the configuration. One logical id
556       * for each namenode in the in the HA setup.
557       * 
558       * @param conf configuration
559       * @param nsId the nameservice ID to look at, or null for non-federated 
560       * @return collection of namenode Ids
561       */
562      public static Collection<String> getNameNodeIds(Configuration conf, String nsId) {
563        String key = addSuffix(DFS_HA_NAMENODES_KEY_PREFIX, nsId);
564        return conf.getTrimmedStringCollection(key);
565      }
566      
567      /**
568       * Given a list of keys in the order of preference, returns a value
569       * for the key in the given order from the configuration.
570       * @param defaultValue default value to return, when key was not found
571       * @param keySuffix suffix to add to the key, if it is not null
572       * @param conf Configuration
573       * @param keys list of keys in the order of preference
574       * @return value of the key or default if a key was not found in configuration
575       */
576      private static String getConfValue(String defaultValue, String keySuffix,
577          Configuration conf, String... keys) {
578        String value = null;
579        for (String key : keys) {
580          key = addSuffix(key, keySuffix);
581          value = conf.get(key);
582          if (value != null) {
583            break;
584          }
585        }
586        if (value == null) {
587          value = defaultValue;
588        }
589        return value;
590      }
591      
592      /** Add non empty and non null suffix to a key */
593      private static String addSuffix(String key, String suffix) {
594        if (suffix == null || suffix.isEmpty()) {
595          return key;
596        }
597        assert !suffix.startsWith(".") :
598          "suffix '" + suffix + "' should not already have '.' prepended.";
599        return key + "." + suffix;
600      }
601      
602      /** Concatenate list of suffix strings '.' separated */
603      private static String concatSuffixes(String... suffixes) {
604        if (suffixes == null) {
605          return null;
606        }
607        return Joiner.on(".").skipNulls().join(suffixes);
608      }
609      
610      /**
611       * Return configuration key of format key.suffix1.suffix2...suffixN
612       */
613      public static String addKeySuffixes(String key, String... suffixes) {
614        String keySuffix = concatSuffixes(suffixes);
615        return addSuffix(key, keySuffix);
616      }
617    
618      /**
619       * Returns the configured address for all NameNodes in the cluster.
620       * @param conf configuration
621       * @param defaultAddress default address to return in case key is not found.
622       * @param keys Set of keys to look for in the order of preference
623       * @return a map(nameserviceId to map(namenodeId to InetSocketAddress))
624       */
625      private static Map<String, Map<String, InetSocketAddress>>
626        getAddresses(Configuration conf, String defaultAddress, String... keys) {
627        Collection<String> nameserviceIds = getNameServiceIds(conf);
628        return getAddressesForNsIds(conf, nameserviceIds, defaultAddress, keys);
629      }
630    
631      /**
632       * Returns the configured address for all NameNodes in the cluster.
633       * @param conf configuration
634       * @param nsIds
635       *@param defaultAddress default address to return in case key is not found.
636       * @param keys Set of keys to look for in the order of preference   @return a map(nameserviceId to map(namenodeId to InetSocketAddress))
637       */
638      private static Map<String, Map<String, InetSocketAddress>>
639        getAddressesForNsIds(Configuration conf, Collection<String> nsIds,
640                             String defaultAddress, String... keys) {
641        // Look for configurations of the form <key>[.<nameserviceId>][.<namenodeId>]
642        // across all of the configured nameservices and namenodes.
643        Map<String, Map<String, InetSocketAddress>> ret = Maps.newLinkedHashMap();
644        for (String nsId : emptyAsSingletonNull(nsIds)) {
645          Map<String, InetSocketAddress> isas =
646            getAddressesForNameserviceId(conf, nsId, defaultAddress, keys);
647          if (!isas.isEmpty()) {
648            ret.put(nsId, isas);
649          }
650        }
651        return ret;
652      }
653      
654      /**
655       * Get all of the RPC addresses of the individual NNs in a given nameservice.
656       * 
657       * @param conf Configuration
658       * @param nsId the nameservice whose NNs addresses we want.
659       * @param defaultValue default address to return in case key is not found.
660       * @return A map from nnId -> RPC address of each NN in the nameservice.
661       */
662      public static Map<String, InetSocketAddress> getRpcAddressesForNameserviceId(
663          Configuration conf, String nsId, String defaultValue) {
664        return getAddressesForNameserviceId(conf, nsId, defaultValue,
665            DFS_NAMENODE_RPC_ADDRESS_KEY);
666      }
667    
668      private static Map<String, InetSocketAddress> getAddressesForNameserviceId(
669          Configuration conf, String nsId, String defaultValue,
670          String... keys) {
671        Collection<String> nnIds = getNameNodeIds(conf, nsId);
672        Map<String, InetSocketAddress> ret = Maps.newHashMap();
673        for (String nnId : emptyAsSingletonNull(nnIds)) {
674          String suffix = concatSuffixes(nsId, nnId);
675          String address = getConfValue(defaultValue, suffix, conf, keys);
676          if (address != null) {
677            InetSocketAddress isa = NetUtils.createSocketAddr(address);
678            if (isa.isUnresolved()) {
679              LOG.warn("Namenode for " + nsId +
680                       " remains unresolved for ID " + nnId +
681                       ".  Check your hdfs-site.xml file to " +
682                       "ensure namenodes are configured properly.");
683            }
684            ret.put(nnId, isa);
685          }
686        }
687        return ret;
688      }
689    
690      /**
691       * @return a collection of all configured NN Kerberos principals.
692       */
693      public static Set<String> getAllNnPrincipals(Configuration conf) throws IOException {
694        Set<String> principals = new HashSet<String>();
695        for (String nsId : DFSUtil.getNameServiceIds(conf)) {
696          if (HAUtil.isHAEnabled(conf, nsId)) {
697            for (String nnId : DFSUtil.getNameNodeIds(conf, nsId)) {
698              Configuration confForNn = new Configuration(conf);
699              NameNode.initializeGenericKeys(confForNn, nsId, nnId);
700              String principal = SecurityUtil.getServerPrincipal(confForNn
701                  .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
702                  NameNode.getAddress(confForNn).getHostName());
703              principals.add(principal);
704            }
705          } else {
706            Configuration confForNn = new Configuration(conf);
707            NameNode.initializeGenericKeys(confForNn, nsId, null);
708            String principal = SecurityUtil.getServerPrincipal(confForNn
709                .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY),
710                NameNode.getAddress(confForNn).getHostName());
711            principals.add(principal);
712          }
713        }
714    
715        return principals;
716      }
717    
718      /**
719       * Returns list of InetSocketAddress corresponding to HA NN RPC addresses from
720       * the configuration.
721       * 
722       * @param conf configuration
723       * @return list of InetSocketAddresses
724       */
725      public static Map<String, Map<String, InetSocketAddress>> getHaNnRpcAddresses(
726          Configuration conf) {
727        return getAddresses(conf, null, DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
728      }
729    
730      /**
731       * Returns list of InetSocketAddress corresponding to HA NN HTTP addresses from
732       * the configuration.
733       *
734       * @return list of InetSocketAddresses
735       */
736      public static Map<String, Map<String, InetSocketAddress>> getHaNnWebHdfsAddresses(
737          Configuration conf, String scheme) {
738        if (WebHdfsFileSystem.SCHEME.equals(scheme)) {
739          return getAddresses(conf, null,
740              DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
741        } else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) {
742          return getAddresses(conf, null,
743              DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY);
744        } else {
745          throw new IllegalArgumentException("Unsupported scheme: " + scheme);
746        }
747      }
748    
749      /**
750       * Returns list of InetSocketAddress corresponding to  backup node rpc 
751       * addresses from the configuration.
752       * 
753       * @param conf configuration
754       * @return list of InetSocketAddresses
755       * @throws IOException on error
756       */
757      public static Map<String, Map<String, InetSocketAddress>> getBackupNodeAddresses(
758          Configuration conf) throws IOException {
759        Map<String, Map<String, InetSocketAddress>> addressList = getAddresses(conf,
760            null, DFS_NAMENODE_BACKUP_ADDRESS_KEY);
761        if (addressList.isEmpty()) {
762          throw new IOException("Incorrect configuration: backup node address "
763              + DFS_NAMENODE_BACKUP_ADDRESS_KEY + " is not configured.");
764        }
765        return addressList;
766      }
767    
768      /**
769       * Returns list of InetSocketAddresses of corresponding to secondary namenode
770       * http addresses from the configuration.
771       * 
772       * @param conf configuration
773       * @return list of InetSocketAddresses
774       * @throws IOException on error
775       */
776      public static Map<String, Map<String, InetSocketAddress>> getSecondaryNameNodeAddresses(
777          Configuration conf) throws IOException {
778        Map<String, Map<String, InetSocketAddress>> addressList = getAddresses(conf, null,
779            DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
780        if (addressList.isEmpty()) {
781          throw new IOException("Incorrect configuration: secondary namenode address "
782              + DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY + " is not configured.");
783        }
784        return addressList;
785      }
786    
787      /**
788       * Returns list of InetSocketAddresses corresponding to namenodes from the
789       * configuration.
790       * 
791       * Returns namenode address specifically configured for datanodes (using
792       * service ports), if found. If not, regular RPC address configured for other
793       * clients is returned.
794       * 
795       * @param conf configuration
796       * @return list of InetSocketAddress
797       * @throws IOException on error
798       */
799      public static Map<String, Map<String, InetSocketAddress>> getNNServiceRpcAddresses(
800          Configuration conf) throws IOException {
801        // Use default address as fall back
802        String defaultAddress;
803        try {
804          defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf));
805        } catch (IllegalArgumentException e) {
806          defaultAddress = null;
807        }
808        
809        Map<String, Map<String, InetSocketAddress>> addressList =
810          getAddresses(conf, defaultAddress,
811            DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
812        if (addressList.isEmpty()) {
813          throw new IOException("Incorrect configuration: namenode address "
814              + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or "  
815              + DFS_NAMENODE_RPC_ADDRESS_KEY
816              + " is not configured.");
817        }
818        return addressList;
819      }
820    
821      /**
822       * Returns list of InetSocketAddresses corresponding to the namenode
823       * that manages this cluster. Note this is to be used by datanodes to get
824       * the list of namenode addresses to talk to.
825       *
826       * Returns namenode address specifically configured for datanodes (using
827       * service ports), if found. If not, regular RPC address configured for other
828       * clients is returned.
829       *
830       * @param conf configuration
831       * @return list of InetSocketAddress
832       * @throws IOException on error
833       */
834      public static Map<String, Map<String, InetSocketAddress>>
835        getNNServiceRpcAddressesForCluster(Configuration conf) throws IOException {
836        // Use default address as fall back
837        String defaultAddress;
838        try {
839          defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf));
840        } catch (IllegalArgumentException e) {
841          defaultAddress = null;
842        }
843    
844        Collection<String> parentNameServices = conf.getTrimmedStringCollection
845                (DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY);
846    
847        if (parentNameServices.isEmpty()) {
848          parentNameServices = conf.getTrimmedStringCollection
849                  (DFSConfigKeys.DFS_NAMESERVICES);
850        } else {
851          // Ensure that the internal service is ineed in the list of all available
852          // nameservices.
853          Set<String> availableNameServices = Sets.newHashSet(conf
854                  .getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES));
855          for (String nsId : parentNameServices) {
856            if (!availableNameServices.contains(nsId)) {
857              throw new IOException("Unknown nameservice: " + nsId);
858            }
859          }
860        }
861    
862        Map<String, Map<String, InetSocketAddress>> addressList =
863                getAddressesForNsIds(conf, parentNameServices, defaultAddress,
864                        DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
865        if (addressList.isEmpty()) {
866          throw new IOException("Incorrect configuration: namenode address "
867                  + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or "
868                  + DFS_NAMENODE_RPC_ADDRESS_KEY
869                  + " is not configured.");
870        }
871        return addressList;
872      }
873    
874      /**
875       * Flatten the given map, as returned by other functions in this class,
876       * into a flat list of {@link ConfiguredNNAddress} instances.
877       */
878      public static List<ConfiguredNNAddress> flattenAddressMap(
879          Map<String, Map<String, InetSocketAddress>> map) {
880        List<ConfiguredNNAddress> ret = Lists.newArrayList();
881        
882        for (Map.Entry<String, Map<String, InetSocketAddress>> entry :
883          map.entrySet()) {
884          String nsId = entry.getKey();
885          Map<String, InetSocketAddress> nnMap = entry.getValue();
886          for (Map.Entry<String, InetSocketAddress> e2 : nnMap.entrySet()) {
887            String nnId = e2.getKey();
888            InetSocketAddress addr = e2.getValue();
889            
890            ret.add(new ConfiguredNNAddress(nsId, nnId, addr));
891          }
892        }
893        return ret;
894      }
895    
896      /**
897       * Format the given map, as returned by other functions in this class,
898       * into a string suitable for debugging display. The format of this string
899       * should not be considered an interface, and is liable to change.
900       */
901      public static String addressMapToString(
902          Map<String, Map<String, InetSocketAddress>> map) {
903        StringBuilder b = new StringBuilder();
904        for (Map.Entry<String, Map<String, InetSocketAddress>> entry :
905             map.entrySet()) {
906          String nsId = entry.getKey();
907          Map<String, InetSocketAddress> nnMap = entry.getValue();
908          b.append("Nameservice <").append(nsId).append(">:").append("\n");
909          for (Map.Entry<String, InetSocketAddress> e2 : nnMap.entrySet()) {
910            b.append("  NN ID ").append(e2.getKey())
911              .append(" => ").append(e2.getValue()).append("\n");
912          }
913        }
914        return b.toString();
915      }
916      
917      public static String nnAddressesAsString(Configuration conf) {
918        Map<String, Map<String, InetSocketAddress>> addresses =
919          getHaNnRpcAddresses(conf);
920        return addressMapToString(addresses);
921      }
922    
923      /**
924       * Represent one of the NameNodes configured in the cluster.
925       */
926      public static class ConfiguredNNAddress {
927        private final String nameserviceId;
928        private final String namenodeId;
929        private final InetSocketAddress addr;
930    
931        private ConfiguredNNAddress(String nameserviceId, String namenodeId,
932            InetSocketAddress addr) {
933          this.nameserviceId = nameserviceId;
934          this.namenodeId = namenodeId;
935          this.addr = addr;
936        }
937    
938        public String getNameserviceId() {
939          return nameserviceId;
940        }
941    
942        public String getNamenodeId() {
943          return namenodeId;
944        }
945    
946        public InetSocketAddress getAddress() {
947          return addr;
948        }
949        
950        @Override
951        public String toString() {
952          return "ConfiguredNNAddress[nsId=" + nameserviceId + ";" +
953            "nnId=" + namenodeId + ";addr=" + addr + "]";
954        }
955      }
956      
957      /**
958       * Get a URI for each configured nameservice. If a nameservice is
959       * HA-enabled, then the logical URI of the nameservice is returned. If the
960       * nameservice is not HA-enabled, then a URI corresponding to an RPC address
961       * of the single NN for that nameservice is returned, preferring the service
962       * RPC address over the client RPC address.
963       * 
964       * @param conf configuration
965       * @return a collection of all configured NN URIs, preferring service
966       *         addresses
967       */
968      public static Collection<URI> getNsServiceRpcUris(Configuration conf) {
969        return getNameServiceUris(conf,
970            DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
971            DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
972      }
973    
974      /**
975       * Get a URI for each configured nameservice. If a nameservice is
976       * HA-enabled, then the logical URI of the nameservice is returned. If the
977       * nameservice is not HA-enabled, then a URI corresponding to the address of
978       * the single NN for that nameservice is returned.
979       * 
980       * @param conf configuration
981       * @param keys configuration keys to try in order to get the URI for non-HA
982       *        nameservices
983       * @return a collection of all configured NN URIs
984       */
985      public static Collection<URI> getNameServiceUris(Configuration conf,
986          String... keys) {
987        Set<URI> ret = new HashSet<URI>();
988        
989        // We're passed multiple possible configuration keys for any given NN or HA
990        // nameservice, and search the config in order of these keys. In order to
991        // make sure that a later config lookup (e.g. fs.defaultFS) doesn't add a
992        // URI for a config key for which we've already found a preferred entry, we
993        // keep track of non-preferred keys here.
994        Set<URI> nonPreferredUris = new HashSet<URI>();
995        
996        for (String nsId : getNameServiceIds(conf)) {
997          if (HAUtil.isHAEnabled(conf, nsId)) {
998            // Add the logical URI of the nameservice.
999            try {
1000              ret.add(new URI(HdfsConstants.HDFS_URI_SCHEME + "://" + nsId));
1001            } catch (URISyntaxException ue) {
1002              throw new IllegalArgumentException(ue);
1003            }
1004          } else {
1005            // Add the URI corresponding to the address of the NN.
1006            boolean uriFound = false;
1007            for (String key : keys) {
1008              String addr = conf.get(concatSuffixes(key, nsId));
1009              if (addr != null) {
1010                URI uri = createUri(HdfsConstants.HDFS_URI_SCHEME,
1011                    NetUtils.createSocketAddr(addr));
1012                if (!uriFound) {
1013                  uriFound = true;
1014                  ret.add(uri);
1015                } else {
1016                  nonPreferredUris.add(uri);
1017                }
1018              }
1019            }
1020          }
1021        }
1022        
1023        // Add the generic configuration keys.
1024        boolean uriFound = false;
1025        for (String key : keys) {
1026          String addr = conf.get(key);
1027          if (addr != null) {
1028            URI uri = createUri("hdfs", NetUtils.createSocketAddr(addr));
1029            if (!uriFound) {
1030              uriFound = true;
1031              ret.add(uri);
1032            } else {
1033              nonPreferredUris.add(uri);
1034            }
1035          }
1036        }
1037        
1038        // Add the default URI if it is an HDFS URI.
1039        URI defaultUri = FileSystem.getDefaultUri(conf);
1040        // checks if defaultUri is ip:port format
1041        // and convert it to hostname:port format
1042        if (defaultUri != null && (defaultUri.getPort() != -1)) {
1043          defaultUri = createUri(defaultUri.getScheme(),
1044              NetUtils.createSocketAddr(defaultUri.getHost(), 
1045                  defaultUri.getPort()));
1046        }
1047        if (defaultUri != null &&
1048            HdfsConstants.HDFS_URI_SCHEME.equals(defaultUri.getScheme()) &&
1049            !nonPreferredUris.contains(defaultUri)) {
1050          ret.add(defaultUri);
1051        }
1052        
1053        return ret;
1054      }
1055    
1056      /**
1057       * Given the InetSocketAddress this method returns the nameservice Id
1058       * corresponding to the key with matching address, by doing a reverse 
1059       * lookup on the list of nameservices until it finds a match.
1060       * 
1061       * Since the process of resolving URIs to Addresses is slightly expensive,
1062       * this utility method should not be used in performance-critical routines.
1063       * 
1064       * @param conf - configuration
1065       * @param address - InetSocketAddress for configured communication with NN.
1066       *     Configured addresses are typically given as URIs, but we may have to
1067       *     compare against a URI typed in by a human, or the server name may be
1068       *     aliased, so we compare unambiguous InetSocketAddresses instead of just
1069       *     comparing URI substrings.
1070       * @param keys - list of configured communication parameters that should
1071       *     be checked for matches.  For example, to compare against RPC addresses,
1072       *     provide the list DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
1073       *     DFS_NAMENODE_RPC_ADDRESS_KEY.  Use the generic parameter keys,
1074       *     not the NameServiceId-suffixed keys.
1075       * @return nameserviceId, or null if no match found
1076       */
1077      public static String getNameServiceIdFromAddress(final Configuration conf, 
1078          final InetSocketAddress address, String... keys) {
1079        // Configuration with a single namenode and no nameserviceId
1080        String[] ids = getSuffixIDs(conf, address, keys);
1081        return (ids != null) ? ids[0] : null;
1082      }
1083      
1084      /**
1085       * return server http or https address from the configuration for a
1086       * given namenode rpc address.
1087       * @param namenodeAddr - namenode RPC address
1088       * @param conf configuration
1089       * @param scheme - the scheme (http / https)
1090       * @return server http or https address
1091       * @throws IOException 
1092       */
1093      public static URI getInfoServer(InetSocketAddress namenodeAddr,
1094          Configuration conf, String scheme) throws IOException {
1095        String[] suffixes = null;
1096        if (namenodeAddr != null) {
1097          // if non-default namenode, try reverse look up 
1098          // the nameServiceID if it is available
1099          suffixes = getSuffixIDs(conf, namenodeAddr,
1100              DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
1101              DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
1102        }
1103    
1104        String authority;
1105        if ("http".equals(scheme)) {
1106          authority = getSuffixedConf(conf, DFS_NAMENODE_HTTP_ADDRESS_KEY,
1107              DFS_NAMENODE_HTTP_ADDRESS_DEFAULT, suffixes);
1108        } else if ("https".equals(scheme)) {
1109          authority = getSuffixedConf(conf, DFS_NAMENODE_HTTPS_ADDRESS_KEY,
1110              DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT, suffixes);
1111        } else {
1112          throw new IllegalArgumentException("Invalid scheme:" + scheme);
1113        }
1114    
1115        if (namenodeAddr != null) {
1116          authority = substituteForWildcardAddress(authority,
1117              namenodeAddr.getHostName());
1118        }
1119        return URI.create(scheme + "://" + authority);
1120      }
1121    
1122      /**
1123       * Lookup the HTTP / HTTPS address of the namenode, and replace its hostname
1124       * with defaultHost when it found out that the address is a wildcard / local
1125       * address.
1126       *
1127       * @param defaultHost
1128       *          The default host name of the namenode.
1129       * @param conf
1130       *          The configuration
1131       * @param scheme
1132       *          HTTP or HTTPS
1133       * @throws IOException
1134       */
1135      public static URI getInfoServerWithDefaultHost(String defaultHost,
1136          Configuration conf, final String scheme) throws IOException {
1137        URI configuredAddr = getInfoServer(null, conf, scheme);
1138        String authority = substituteForWildcardAddress(
1139            configuredAddr.getAuthority(), defaultHost);
1140        return URI.create(scheme + "://" + authority);
1141      }
1142    
1143      /**
1144       * Determine whether HTTP or HTTPS should be used to connect to the remote
1145       * server. Currently the client only connects to the server via HTTPS if the
1146       * policy is set to HTTPS_ONLY.
1147       *
1148       * @return the scheme (HTTP / HTTPS)
1149       */
1150      public static String getHttpClientScheme(Configuration conf) {
1151        HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
1152        return policy == HttpConfig.Policy.HTTPS_ONLY ? "https" : "http";
1153      }
1154    
1155      /**
1156       * Substitute a default host in the case that an address has been configured
1157       * with a wildcard. This is used, for example, when determining the HTTP
1158       * address of the NN -- if it's configured to bind to 0.0.0.0, we want to
1159       * substitute the hostname from the filesystem URI rather than trying to
1160       * connect to 0.0.0.0.
1161       * @param configuredAddress the address found in the configuration
1162       * @param defaultHost the host to substitute with, if configuredAddress
1163       * is a local/wildcard address.
1164       * @return the substituted address
1165       * @throws IOException if it is a wildcard address and security is enabled
1166       */
1167      @VisibleForTesting
1168      static String substituteForWildcardAddress(String configuredAddress,
1169        String defaultHost) throws IOException {
1170        InetSocketAddress sockAddr = NetUtils.createSocketAddr(configuredAddress);
1171        InetSocketAddress defaultSockAddr = NetUtils.createSocketAddr(defaultHost
1172            + ":0");
1173        final InetAddress addr = sockAddr.getAddress();
1174        if (addr != null && addr.isAnyLocalAddress()) {
1175          if (UserGroupInformation.isSecurityEnabled() &&
1176              defaultSockAddr.getAddress().isAnyLocalAddress()) {
1177            throw new IOException("Cannot use a wildcard address with security. " +
1178                "Must explicitly set bind address for Kerberos");
1179          }
1180          return defaultHost + ":" + sockAddr.getPort();
1181        } else {
1182          return configuredAddress;
1183        }
1184      }
1185      
1186      private static String getSuffixedConf(Configuration conf,
1187          String key, String defaultVal, String[] suffixes) {
1188        String ret = conf.get(DFSUtil.addKeySuffixes(key, suffixes));
1189        if (ret != null) {
1190          return ret;
1191        }
1192        return conf.get(key, defaultVal);
1193      }
1194      
1195      /**
1196       * Sets the node specific setting into generic configuration key. Looks up
1197       * value of "key.nameserviceId.namenodeId" and if found sets that value into 
1198       * generic key in the conf. If this is not found, falls back to
1199       * "key.nameserviceId" and then the unmodified key.
1200       *
1201       * Note that this only modifies the runtime conf.
1202       * 
1203       * @param conf
1204       *          Configuration object to lookup specific key and to set the value
1205       *          to the key passed. Note the conf object is modified.
1206       * @param nameserviceId
1207       *          nameservice Id to construct the node specific key. Pass null if
1208       *          federation is not configuration.
1209       * @param nnId
1210       *          namenode Id to construct the node specific key. Pass null if
1211       *          HA is not configured.
1212       * @param keys
1213       *          The key for which node specific value is looked up
1214       */
1215      public static void setGenericConf(Configuration conf,
1216          String nameserviceId, String nnId, String... keys) {
1217        for (String key : keys) {
1218          String value = conf.get(addKeySuffixes(key, nameserviceId, nnId));
1219          if (value != null) {
1220            conf.set(key, value);
1221            continue;
1222          }
1223          value = conf.get(addKeySuffixes(key, nameserviceId));
1224          if (value != null) {
1225            conf.set(key, value);
1226          }
1227        }
1228      }
1229      
1230      /** Return used as percentage of capacity */
1231      public static float getPercentUsed(long used, long capacity) {
1232        return capacity <= 0 ? 100 : (used * 100.0f)/capacity; 
1233      }
1234      
1235      /** Return remaining as percentage of capacity */
1236      public static float getPercentRemaining(long remaining, long capacity) {
1237        return capacity <= 0 ? 0 : (remaining * 100.0f)/capacity; 
1238      }
1239    
1240      /** Convert percentage to a string. */
1241      public static String percent2String(double percentage) {
1242        return StringUtils.format("%.2f%%", percentage);
1243      }
1244    
1245      /**
1246       * Round bytes to GiB (gibibyte)
1247       * @param bytes number of bytes
1248       * @return number of GiB
1249       */
1250      public static int roundBytesToGB(long bytes) {
1251        return Math.round((float)bytes/ 1024 / 1024 / 1024);
1252      }
1253      
1254      /** Create a {@link ClientDatanodeProtocol} proxy */
1255      public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
1256          DatanodeID datanodeid, Configuration conf, int socketTimeout,
1257          boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException {
1258        return new ClientDatanodeProtocolTranslatorPB(datanodeid, conf, socketTimeout,
1259            connectToDnViaHostname, locatedBlock);
1260      }
1261      
1262      /** Create {@link ClientDatanodeProtocol} proxy using kerberos ticket */
1263      public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
1264          DatanodeID datanodeid, Configuration conf, int socketTimeout,
1265          boolean connectToDnViaHostname) throws IOException {
1266        return new ClientDatanodeProtocolTranslatorPB(
1267            datanodeid, conf, socketTimeout, connectToDnViaHostname);
1268      }
1269      
1270      /** Create a {@link ClientDatanodeProtocol} proxy */
1271      public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
1272          InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
1273          SocketFactory factory) throws IOException {
1274        return new ClientDatanodeProtocolTranslatorPB(addr, ticket, conf, factory);
1275      }
1276    
1277      /**
1278       * Get nameservice Id for the {@link NameNode} based on namenode RPC address
1279       * matching the local node address.
1280       */
1281      public static String getNamenodeNameServiceId(Configuration conf) {
1282        return getNameServiceId(conf, DFS_NAMENODE_RPC_ADDRESS_KEY);
1283      }
1284      
1285      /**
1286       * Get nameservice Id for the BackupNode based on backup node RPC address
1287       * matching the local node address.
1288       */
1289      public static String getBackupNameServiceId(Configuration conf) {
1290        return getNameServiceId(conf, DFS_NAMENODE_BACKUP_ADDRESS_KEY);
1291      }
1292      
1293      /**
1294       * Get nameservice Id for the secondary node based on secondary http address
1295       * matching the local node address.
1296       */
1297      public static String getSecondaryNameServiceId(Configuration conf) {
1298        return getNameServiceId(conf, DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
1299      }
1300      
1301      /**
1302       * Get the nameservice Id by matching the {@code addressKey} with the
1303       * the address of the local node. 
1304       * 
1305       * If {@link DFSConfigKeys#DFS_NAMESERVICE_ID} is not specifically
1306       * configured, and more than one nameservice Id is configured, this method 
1307       * determines the nameservice Id by matching the local node's address with the
1308       * configured addresses. When a match is found, it returns the nameservice Id
1309       * from the corresponding configuration key.
1310       * 
1311       * @param conf Configuration
1312       * @param addressKey configuration key to get the address.
1313       * @return nameservice Id on success, null if federation is not configured.
1314       * @throws HadoopIllegalArgumentException on error
1315       */
1316      private static String getNameServiceId(Configuration conf, String addressKey) {
1317        String nameserviceId = conf.get(DFS_NAMESERVICE_ID);
1318        if (nameserviceId != null) {
1319          return nameserviceId;
1320        }
1321        Collection<String> nsIds = getNameServiceIds(conf);
1322        if (1 == nsIds.size()) {
1323          return nsIds.toArray(new String[1])[0];
1324        }
1325        String nnId = conf.get(DFS_HA_NAMENODE_ID_KEY);
1326        
1327        return getSuffixIDs(conf, addressKey, null, nnId, LOCAL_ADDRESS_MATCHER)[0];
1328      }
1329      
1330      /**
1331       * Returns nameservice Id and namenode Id when the local host matches the
1332       * configuration parameter {@code addressKey}.<nameservice Id>.<namenode Id>
1333       * 
1334       * @param conf Configuration
1335       * @param addressKey configuration key corresponding to the address.
1336       * @param knownNsId only look at configs for the given nameservice, if not-null
1337       * @param knownNNId only look at configs for the given namenode, if not null
1338       * @param matcher matching criteria for matching the address
1339       * @return Array with nameservice Id and namenode Id on success. First element
1340       *         in the array is nameservice Id and second element is namenode Id.
1341       *         Null value indicates that the configuration does not have the the
1342       *         Id.
1343       * @throws HadoopIllegalArgumentException on error
1344       */
1345      static String[] getSuffixIDs(final Configuration conf, final String addressKey,
1346          String knownNsId, String knownNNId,
1347          final AddressMatcher matcher) {
1348        String nameserviceId = null;
1349        String namenodeId = null;
1350        int found = 0;
1351        
1352        Collection<String> nsIds = getNameServiceIds(conf);
1353        for (String nsId : emptyAsSingletonNull(nsIds)) {
1354          if (knownNsId != null && !knownNsId.equals(nsId)) {
1355            continue;
1356          }
1357          
1358          Collection<String> nnIds = getNameNodeIds(conf, nsId);
1359          for (String nnId : emptyAsSingletonNull(nnIds)) {
1360            if (LOG.isTraceEnabled()) {
1361              LOG.trace(String.format("addressKey: %s nsId: %s nnId: %s",
1362                  addressKey, nsId, nnId));
1363            }
1364            if (knownNNId != null && !knownNNId.equals(nnId)) {
1365              continue;
1366            }
1367            String key = addKeySuffixes(addressKey, nsId, nnId);
1368            String addr = conf.get(key);
1369            if (addr == null) {
1370              continue;
1371            }
1372            InetSocketAddress s = null;
1373            try {
1374              s = NetUtils.createSocketAddr(addr);
1375            } catch (Exception e) {
1376              LOG.warn("Exception in creating socket address " + addr, e);
1377              continue;
1378            }
1379            if (!s.isUnresolved() && matcher.match(s)) {
1380              nameserviceId = nsId;
1381              namenodeId = nnId;
1382              found++;
1383            }
1384          }
1385        }
1386        if (found > 1) { // Only one address must match the local address
1387          String msg = "Configuration has multiple addresses that match "
1388              + "local node's address. Please configure the system with "
1389              + DFS_NAMESERVICE_ID + " and "
1390              + DFS_HA_NAMENODE_ID_KEY;
1391          throw new HadoopIllegalArgumentException(msg);
1392        }
1393        return new String[] { nameserviceId, namenodeId };
1394      }
1395      
1396      /**
1397       * For given set of {@code keys} adds nameservice Id and or namenode Id
1398       * and returns {nameserviceId, namenodeId} when address match is found.
1399       * @see #getSuffixIDs(Configuration, String, String, String, AddressMatcher)
1400       */
1401      static String[] getSuffixIDs(final Configuration conf,
1402          final InetSocketAddress address, final String... keys) {
1403        AddressMatcher matcher = new AddressMatcher() {
1404         @Override
1405          public boolean match(InetSocketAddress s) {
1406            return address.equals(s);
1407          } 
1408        };
1409        
1410        for (String key : keys) {
1411          String[] ids = getSuffixIDs(conf, key, null, null, matcher);
1412          if (ids != null && (ids [0] != null || ids[1] != null)) {
1413            return ids;
1414          }
1415        }
1416        return null;
1417      }
1418      
1419      private interface AddressMatcher {
1420        public boolean match(InetSocketAddress s);
1421      }
1422    
1423      /** Create a URI from the scheme and address */
1424      public static URI createUri(String scheme, InetSocketAddress address) {
1425        try {
1426          return new URI(scheme, null, address.getHostName(), address.getPort(),
1427              null, null, null);
1428        } catch (URISyntaxException ue) {
1429          throw new IllegalArgumentException(ue);
1430        }
1431      }
1432      
1433      /**
1434       * Add protobuf based protocol to the {@link org.apache.hadoop.ipc.RPC.Server}
1435       * @param conf configuration
1436       * @param protocol Protocol interface
1437       * @param service service that implements the protocol
1438       * @param server RPC server to which the protocol & implementation is added to
1439       * @throws IOException
1440       */
1441      public static void addPBProtocol(Configuration conf, Class<?> protocol,
1442          BlockingService service, RPC.Server server) throws IOException {
1443        RPC.setProtocolEngine(conf, protocol, ProtobufRpcEngine.class);
1444        server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocol, service);
1445      }
1446    
1447      /**
1448       * Map a logical namenode ID to its service address. Use the given
1449       * nameservice if specified, or the configured one if none is given.
1450       *
1451       * @param conf Configuration
1452       * @param nsId which nameservice nnId is a part of, optional
1453       * @param nnId the namenode ID to get the service addr for
1454       * @return the service addr, null if it could not be determined
1455       */
1456      public static String getNamenodeServiceAddr(final Configuration conf,
1457          String nsId, String nnId) {
1458    
1459        if (nsId == null) {
1460          nsId = getOnlyNameServiceIdOrNull(conf);
1461        }
1462    
1463        String serviceAddrKey = concatSuffixes(
1464            DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nsId, nnId);
1465    
1466        String addrKey = concatSuffixes(
1467            DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, nsId, nnId);
1468    
1469        String serviceRpcAddr = conf.get(serviceAddrKey);
1470        if (serviceRpcAddr == null) {
1471          serviceRpcAddr = conf.get(addrKey);
1472        }
1473        return serviceRpcAddr;
1474      }
1475    
1476      /**
1477       * If the configuration refers to only a single nameservice, return the
1478       * name of that nameservice. If it refers to 0 or more than 1, return null.
1479       */
1480      public static String getOnlyNameServiceIdOrNull(Configuration conf) {
1481        Collection<String> nsIds = getNameServiceIds(conf);
1482        if (1 == nsIds.size()) {
1483          return nsIds.toArray(new String[1])[0];
1484        } else {
1485          // No nameservice ID was given and more than one is configured
1486          return null;
1487        }
1488      }
1489      
1490      public static final Options helpOptions = new Options();
1491      public static final Option helpOpt = new Option("h", "help", false,
1492          "get help information");
1493    
1494      static {
1495        helpOptions.addOption(helpOpt);
1496      }
1497    
1498      /**
1499       * Parse the arguments for commands
1500       * 
1501       * @param args the argument to be parsed
1502       * @param helpDescription help information to be printed out
1503       * @param out Printer
1504       * @param printGenericCommandUsage whether to print the 
1505       *              generic command usage defined in ToolRunner
1506       * @return true when the argument matches help option, false if not
1507       */
1508      public static boolean parseHelpArgument(String[] args,
1509          String helpDescription, PrintStream out, boolean printGenericCommandUsage) {
1510        if (args.length == 1) {
1511          try {
1512            CommandLineParser parser = new PosixParser();
1513            CommandLine cmdLine = parser.parse(helpOptions, args);
1514            if (cmdLine.hasOption(helpOpt.getOpt())
1515                || cmdLine.hasOption(helpOpt.getLongOpt())) {
1516              // should print out the help information
1517              out.println(helpDescription + "\n");
1518              if (printGenericCommandUsage) {
1519                ToolRunner.printGenericCommandUsage(out);
1520              }
1521              return true;
1522            }
1523          } catch (ParseException pe) {
1524            return false;
1525          }
1526        }
1527        return false;
1528      }
1529      
1530      /**
1531       * Get DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION from configuration.
1532       * 
1533       * @param conf Configuration
1534       * @return Value of DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION
1535       */
1536      public static float getInvalidateWorkPctPerIteration(Configuration conf) {
1537        float blocksInvalidateWorkPct = conf.getFloat(
1538            DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,
1539            DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION_DEFAULT);
1540        Preconditions.checkArgument(
1541            (blocksInvalidateWorkPct > 0 && blocksInvalidateWorkPct <= 1.0f),
1542            DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION +
1543            " = '" + blocksInvalidateWorkPct + "' is invalid. " +
1544            "It should be a positive, non-zero float value, not greater than 1.0f, " +
1545            "to indicate a percentage.");
1546        return blocksInvalidateWorkPct;
1547      }
1548    
1549      /**
1550       * Get DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION from
1551       * configuration.
1552       * 
1553       * @param conf Configuration
1554       * @return Value of DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION
1555       */
1556      public static int getReplWorkMultiplier(Configuration conf) {
1557        int blocksReplWorkMultiplier = conf.getInt(
1558                DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,
1559                DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION_DEFAULT);
1560        Preconditions.checkArgument(
1561            (blocksReplWorkMultiplier > 0),
1562            DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION +
1563            " = '" + blocksReplWorkMultiplier + "' is invalid. " +
1564            "It should be a positive, non-zero integer value.");
1565        return blocksReplWorkMultiplier;
1566      }
1567      
1568      /**
1569       * Get SPNEGO keytab Key from configuration
1570       * 
1571       * @param conf Configuration
1572       * @param defaultKey default key to be used for config lookup
1573       * @return DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY if the key is not empty
1574       *         else return defaultKey
1575       */
1576      public static String getSpnegoKeytabKey(Configuration conf, String defaultKey) {
1577        String value = 
1578            conf.get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY);
1579        return (value == null || value.isEmpty()) ?
1580            defaultKey : DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY;
1581      }
1582    
1583      /**
1584       * Get http policy. Http Policy is chosen as follows:
1585       * <ol>
1586       * <li>If hadoop.ssl.enabled is set, http endpoints are not started. Only
1587       * https endpoints are started on configured https ports</li>
1588       * <li>This configuration is overridden by dfs.https.enable configuration, if
1589       * it is set to true. In that case, both http and https endpoints are stared.</li>
1590       * <li>All the above configurations are overridden by dfs.http.policy
1591       * configuration. With this configuration you can set http-only, https-only
1592       * and http-and-https endpoints.</li>
1593       * </ol>
1594       * See hdfs-default.xml documentation for more details on each of the above
1595       * configuration settings.
1596       */
1597      public static HttpConfig.Policy getHttpPolicy(Configuration conf) {
1598        String policyStr = conf.get(DFSConfigKeys.DFS_HTTP_POLICY_KEY);
1599        if (policyStr == null) {
1600          boolean https = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY,
1601              DFSConfigKeys.DFS_HTTPS_ENABLE_DEFAULT);
1602    
1603          boolean hadoopSsl = conf.getBoolean(
1604              CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY,
1605              CommonConfigurationKeys.HADOOP_SSL_ENABLED_DEFAULT);
1606    
1607          if (hadoopSsl) {
1608            LOG.warn(CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY
1609                + " is deprecated. Please use " + DFSConfigKeys.DFS_HTTP_POLICY_KEY
1610                + ".");
1611          }
1612          if (https) {
1613            LOG.warn(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY
1614                + " is deprecated. Please use " + DFSConfigKeys.DFS_HTTP_POLICY_KEY
1615                + ".");
1616          }
1617    
1618          return (hadoopSsl || https) ? HttpConfig.Policy.HTTP_AND_HTTPS
1619              : HttpConfig.Policy.HTTP_ONLY;
1620        }
1621    
1622        HttpConfig.Policy policy = HttpConfig.Policy.fromString(policyStr);
1623        if (policy == null) {
1624          throw new HadoopIllegalArgumentException("Unregonized value '"
1625              + policyStr + "' for " + DFSConfigKeys.DFS_HTTP_POLICY_KEY);
1626        }
1627    
1628        conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
1629        return policy;
1630      }
1631    
1632      public static HttpServer2.Builder loadSslConfToHttpServerBuilder(HttpServer2.Builder builder,
1633          Configuration sslConf) {
1634        return builder
1635            .needsClientAuth(
1636                sslConf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
1637                    DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT))
1638            .keyPassword(getPassword(sslConf, DFS_SERVER_HTTPS_KEYPASSWORD_KEY))
1639            .keyStore(sslConf.get("ssl.server.keystore.location"),
1640                getPassword(sslConf, DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY),
1641                sslConf.get("ssl.server.keystore.type", "jks"))
1642            .trustStore(sslConf.get("ssl.server.truststore.location"),
1643                getPassword(sslConf, DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY),
1644                sslConf.get("ssl.server.truststore.type", "jks"));
1645      }
1646    
1647      /**
1648       * Load HTTPS-related configuration.
1649       */
1650      public static Configuration loadSslConfiguration(Configuration conf) {
1651        Configuration sslConf = new Configuration(false);
1652    
1653        sslConf.addResource(conf.get(
1654            DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
1655            DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
1656    
1657        boolean requireClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
1658            DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
1659        sslConf.setBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, requireClientAuth);
1660        return sslConf;
1661      }
1662    
1663      /**
1664       * Return a HttpServer.Builder that the journalnode / namenode / secondary
1665       * namenode can use to initialize their HTTP / HTTPS server.
1666       *
1667       */
1668      public static HttpServer2.Builder httpServerTemplateForNNAndJN(
1669          Configuration conf, final InetSocketAddress httpAddr,
1670          final InetSocketAddress httpsAddr, String name, String spnegoUserNameKey,
1671          String spnegoKeytabFileKey) throws IOException {
1672        HttpConfig.Policy policy = getHttpPolicy(conf);
1673    
1674        HttpServer2.Builder builder = new HttpServer2.Builder().setName(name)
1675            .setConf(conf).setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")))
1676            .setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
1677            .setUsernameConfKey(spnegoUserNameKey)
1678            .setKeytabConfKey(getSpnegoKeytabKey(conf, spnegoKeytabFileKey));
1679    
1680        // initialize the webserver for uploading/downloading files.
1681        if (UserGroupInformation.isSecurityEnabled()) {
1682          LOG.info("Starting web server as: "
1683              + SecurityUtil.getServerPrincipal(conf.get(spnegoUserNameKey),
1684                  httpAddr.getHostName()));
1685        }
1686    
1687        if (policy.isHttpEnabled()) {
1688          if (httpAddr.getPort() == 0) {
1689            builder.setFindPort(true);
1690          }
1691    
1692          URI uri = URI.create("http://" + NetUtils.getHostPortString(httpAddr));
1693          builder.addEndpoint(uri);
1694          LOG.info("Starting Web-server for " + name + " at: " + uri);
1695        }
1696    
1697        if (policy.isHttpsEnabled() && httpsAddr != null) {
1698          Configuration sslConf = loadSslConfiguration(conf);
1699          loadSslConfToHttpServerBuilder(builder, sslConf);
1700    
1701          if (httpsAddr.getPort() == 0) {
1702            builder.setFindPort(true);
1703          }
1704    
1705          URI uri = URI.create("https://" + NetUtils.getHostPortString(httpsAddr));
1706          builder.addEndpoint(uri);
1707          LOG.info("Starting Web-server for " + name + " at: " + uri);
1708        }
1709        return builder;
1710      }
1711    
1712      /**
1713       * Leverages the Configuration.getPassword method to attempt to get
1714       * passwords from the CredentialProvider API before falling back to
1715       * clear text in config - if falling back is allowed.
1716       * @param conf Configuration instance
1717       * @param alias name of the credential to retreive
1718       * @return String credential value or null
1719       */
1720      static String getPassword(Configuration conf, String alias) {
1721        String password = null;
1722        try {
1723          char[] passchars = conf.getPassword(alias);
1724          if (passchars != null) {
1725            password = new String(passchars);
1726          }
1727        }
1728        catch (IOException ioe) {
1729          password = null;
1730        }
1731        return password;
1732      }
1733    
1734      /**
1735       * Converts a Date into an ISO-8601 formatted datetime string.
1736       */
1737      public static String dateToIso8601String(Date date) {
1738        SimpleDateFormat df =
1739            new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ", Locale.ENGLISH);
1740        return df.format(date);
1741      }
1742    
1743      /**
1744       * Converts a time duration in milliseconds into DDD:HH:MM:SS format.
1745       */
1746      public static String durationToString(long durationMs) {
1747        boolean negative = false;
1748        if (durationMs < 0) {
1749          negative = true;
1750          durationMs = -durationMs;
1751        }
1752        // Chop off the milliseconds
1753        long durationSec = durationMs / 1000;
1754        final int secondsPerMinute = 60;
1755        final int secondsPerHour = 60*60;
1756        final int secondsPerDay = 60*60*24;
1757        final long days = durationSec / secondsPerDay;
1758        durationSec -= days * secondsPerDay;
1759        final long hours = durationSec / secondsPerHour;
1760        durationSec -= hours * secondsPerHour;
1761        final long minutes = durationSec / secondsPerMinute;
1762        durationSec -= minutes * secondsPerMinute;
1763        final long seconds = durationSec;
1764        final long milliseconds = durationMs % 1000;
1765        String format = "%03d:%02d:%02d:%02d.%03d";
1766        if (negative)  {
1767          format = "-" + format;
1768        }
1769        return String.format(format, days, hours, minutes, seconds, milliseconds);
1770      }
1771    
1772      /**
1773       * Converts a relative time string into a duration in milliseconds.
1774       */
1775      public static long parseRelativeTime(String relTime) throws IOException {
1776        if (relTime.length() < 2) {
1777          throw new IOException("Unable to parse relative time value of " + relTime
1778              + ": too short");
1779        }
1780        String ttlString = relTime.substring(0, relTime.length()-1);
1781        long ttl;
1782        try {
1783          ttl = Long.parseLong(ttlString);
1784        } catch (NumberFormatException e) {
1785          throw new IOException("Unable to parse relative time value of " + relTime
1786              + ": " + ttlString + " is not a number");
1787        }
1788        if (relTime.endsWith("s")) {
1789          // pass
1790        } else if (relTime.endsWith("m")) {
1791          ttl *= 60;
1792        } else if (relTime.endsWith("h")) {
1793          ttl *= 60*60;
1794        } else if (relTime.endsWith("d")) {
1795          ttl *= 60*60*24;
1796        } else {
1797          throw new IOException("Unable to parse relative time value of " + relTime
1798              + ": unknown time unit " + relTime.charAt(relTime.length() - 1));
1799        }
1800        return ttl*1000;
1801      }
1802    
1803      /**
1804       * Assert that all objects in the collection are equal. Returns silently if
1805       * so, throws an AssertionError if any object is not equal. All null values
1806       * are considered equal.
1807       * 
1808       * @param objects the collection of objects to check for equality.
1809       */
1810      public static void assertAllResultsEqual(Collection<?> objects)
1811          throws AssertionError {
1812        if (objects.size() == 0 || objects.size() == 1)
1813          return;
1814        
1815        Object[] resultsArray = objects.toArray();
1816        for (int i = 1; i < resultsArray.length; i++) {
1817          Object currElement = resultsArray[i];
1818          Object lastElement = resultsArray[i - 1];
1819          if ((currElement == null && currElement != lastElement) ||
1820              (currElement != null && !currElement.equals(lastElement))) {
1821            throw new AssertionError("Not all elements match in results: " +
1822              Arrays.toString(resultsArray));
1823          }
1824        }
1825      }
1826    
1827      /**
1828       * Creates a new KeyProvider from the given Configuration.
1829       *
1830       * @param conf Configuration
1831       * @return new KeyProvider, or null if no provider was found.
1832       * @throws IOException if the KeyProvider is improperly specified in
1833       *                             the Configuration
1834       */
1835      public static KeyProvider createKeyProvider(
1836          final Configuration conf) throws IOException {
1837        final String providerUriStr =
1838            conf.get(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, null);
1839        // No provider set in conf
1840        if (providerUriStr == null) {
1841          return null;
1842        }
1843        final URI providerUri;
1844        try {
1845          providerUri = new URI(providerUriStr);
1846        } catch (URISyntaxException e) {
1847          throw new IOException(e);
1848        }
1849        KeyProvider keyProvider = KeyProviderFactory.get(providerUri, conf);
1850        if (keyProvider == null) {
1851          throw new IOException("Could not instantiate KeyProvider from " + 
1852              DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI + " setting of '" + 
1853              providerUriStr +"'");
1854        }
1855        if (keyProvider.isTransient()) {
1856          throw new IOException("KeyProvider " + keyProvider.toString()
1857              + " was found but it is a transient provider.");
1858        }
1859        return keyProvider;
1860      }
1861    
1862      /**
1863       * Creates a new KeyProviderCryptoExtension by wrapping the
1864       * KeyProvider specified in the given Configuration.
1865       *
1866       * @param conf Configuration
1867       * @return new KeyProviderCryptoExtension, or null if no provider was found.
1868       * @throws IOException if the KeyProvider is improperly specified in
1869       *                             the Configuration
1870       */
1871      public static KeyProviderCryptoExtension createKeyProviderCryptoExtension(
1872          final Configuration conf) throws IOException {
1873        KeyProvider keyProvider = createKeyProvider(conf);
1874        if (keyProvider == null) {
1875          return null;
1876        }
1877        KeyProviderCryptoExtension cryptoProvider = KeyProviderCryptoExtension
1878            .createKeyProviderCryptoExtension(keyProvider);
1879        return cryptoProvider;
1880      }
1881    }