001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018 019package org.apache.hadoop.hdfs; 020 021import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN; 022import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT; 023import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY; 024import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX; 025import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY; 026import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY; 027import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT; 028import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY; 029import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT; 030import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY; 031import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY; 032import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY; 033import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY; 034import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES; 035import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID; 036import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYPASSWORD_KEY; 037import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY; 038import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY; 039 040import java.io.IOException; 041import java.io.PrintStream; 042import java.io.UnsupportedEncodingException; 043import java.net.InetAddress; 044import java.net.InetSocketAddress; 045import java.net.URI; 046import java.net.URISyntaxException; 047import java.security.SecureRandom; 048import java.text.SimpleDateFormat; 049import java.util.Arrays; 050import java.util.Collection; 051import java.util.Collections; 052import java.util.Comparator; 053import java.util.Date; 054import java.util.HashSet; 055import java.util.List; 056import java.util.Locale; 057import java.util.Map; 058import java.util.Random; 059import java.util.Set; 060 061import javax.net.SocketFactory; 062 063import com.google.common.collect.Sets; 064import org.apache.commons.cli.CommandLine; 065import org.apache.commons.cli.CommandLineParser; 066import org.apache.commons.cli.Option; 067import org.apache.commons.cli.Options; 068import org.apache.commons.cli.ParseException; 069import org.apache.commons.cli.PosixParser; 070import org.apache.commons.logging.Log; 071import org.apache.commons.logging.LogFactory; 072import org.apache.hadoop.HadoopIllegalArgumentException; 073import org.apache.hadoop.classification.InterfaceAudience; 074import org.apache.hadoop.conf.Configuration; 075import org.apache.hadoop.crypto.key.KeyProvider; 076import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; 077import org.apache.hadoop.crypto.key.KeyProviderFactory; 078import org.apache.hadoop.fs.BlockLocation; 079import org.apache.hadoop.fs.CommonConfigurationKeys; 080import org.apache.hadoop.fs.FileSystem; 081import org.apache.hadoop.fs.Path; 082import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol; 083import org.apache.hadoop.hdfs.protocol.DatanodeID; 084import org.apache.hadoop.hdfs.protocol.DatanodeInfo; 085import org.apache.hadoop.hdfs.protocol.HdfsConstants; 086import org.apache.hadoop.hdfs.protocol.LocatedBlock; 087import org.apache.hadoop.hdfs.protocol.LocatedBlocks; 088import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB; 089import org.apache.hadoop.hdfs.server.namenode.FSDirectory; 090import org.apache.hadoop.hdfs.server.namenode.NameNode; 091import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem; 092import org.apache.hadoop.hdfs.web.WebHdfsFileSystem; 093import org.apache.hadoop.http.HttpConfig; 094import org.apache.hadoop.http.HttpServer2; 095import org.apache.hadoop.ipc.ProtobufRpcEngine; 096import org.apache.hadoop.ipc.RPC; 097import org.apache.hadoop.net.NetUtils; 098import org.apache.hadoop.net.NodeBase; 099import org.apache.hadoop.security.SecurityUtil; 100import org.apache.hadoop.security.UserGroupInformation; 101import org.apache.hadoop.security.authorize.AccessControlList; 102import org.apache.hadoop.util.StringUtils; 103import org.apache.hadoop.util.ToolRunner; 104 105import com.google.common.annotations.VisibleForTesting; 106import com.google.common.base.Charsets; 107import com.google.common.base.Joiner; 108import com.google.common.base.Preconditions; 109import com.google.common.collect.Lists; 110import com.google.common.collect.Maps; 111import com.google.common.primitives.SignedBytes; 112import com.google.protobuf.BlockingService; 113 114@InterfaceAudience.Private 115public class DFSUtil { 116 public static final Log LOG = LogFactory.getLog(DFSUtil.class.getName()); 117 118 public static final byte[] EMPTY_BYTES = {}; 119 120 /** Compare two byte arrays by lexicographical order. */ 121 public static int compareBytes(byte[] left, byte[] right) { 122 if (left == null) { 123 left = EMPTY_BYTES; 124 } 125 if (right == null) { 126 right = EMPTY_BYTES; 127 } 128 return SignedBytes.lexicographicalComparator().compare(left, right); 129 } 130 131 private DFSUtil() { /* Hidden constructor */ } 132 private static final ThreadLocal<Random> RANDOM = new ThreadLocal<Random>() { 133 @Override 134 protected Random initialValue() { 135 return new Random(); 136 } 137 }; 138 139 private static final ThreadLocal<SecureRandom> SECURE_RANDOM = new ThreadLocal<SecureRandom>() { 140 @Override 141 protected SecureRandom initialValue() { 142 return new SecureRandom(); 143 } 144 }; 145 146 /** @return a pseudo random number generator. */ 147 public static Random getRandom() { 148 return RANDOM.get(); 149 } 150 151 /** @return a pseudo secure random number generator. */ 152 public static SecureRandom getSecureRandom() { 153 return SECURE_RANDOM.get(); 154 } 155 156 /** Shuffle the elements in the given array. */ 157 public static <T> T[] shuffle(final T[] array) { 158 if (array != null && array.length > 0) { 159 final Random random = getRandom(); 160 for (int n = array.length; n > 1; ) { 161 final int randomIndex = random.nextInt(n); 162 n--; 163 if (n != randomIndex) { 164 final T tmp = array[randomIndex]; 165 array[randomIndex] = array[n]; 166 array[n] = tmp; 167 } 168 } 169 } 170 return array; 171 } 172 173 /** 174 * Compartor for sorting DataNodeInfo[] based on decommissioned states. 175 * Decommissioned nodes are moved to the end of the array on sorting with 176 * this compartor. 177 */ 178 public static final Comparator<DatanodeInfo> DECOM_COMPARATOR = 179 new Comparator<DatanodeInfo>() { 180 @Override 181 public int compare(DatanodeInfo a, DatanodeInfo b) { 182 return a.isDecommissioned() == b.isDecommissioned() ? 0 : 183 a.isDecommissioned() ? 1 : -1; 184 } 185 }; 186 187 188 /** 189 * Comparator for sorting DataNodeInfo[] based on decommissioned/stale states. 190 * Decommissioned/stale nodes are moved to the end of the array on sorting 191 * with this comparator. 192 */ 193 @InterfaceAudience.Private 194 public static class DecomStaleComparator implements Comparator<DatanodeInfo> { 195 private final long staleInterval; 196 197 /** 198 * Constructor of DecomStaleComparator 199 * 200 * @param interval 201 * The time interval for marking datanodes as stale is passed from 202 * outside, since the interval may be changed dynamically 203 */ 204 public DecomStaleComparator(long interval) { 205 this.staleInterval = interval; 206 } 207 208 @Override 209 public int compare(DatanodeInfo a, DatanodeInfo b) { 210 // Decommissioned nodes will still be moved to the end of the list 211 if (a.isDecommissioned()) { 212 return b.isDecommissioned() ? 0 : 1; 213 } else if (b.isDecommissioned()) { 214 return -1; 215 } 216 // Stale nodes will be moved behind the normal nodes 217 boolean aStale = a.isStale(staleInterval); 218 boolean bStale = b.isStale(staleInterval); 219 return aStale == bStale ? 0 : (aStale ? 1 : -1); 220 } 221 } 222 223 /** 224 * Address matcher for matching an address to local address 225 */ 226 static final AddressMatcher LOCAL_ADDRESS_MATCHER = new AddressMatcher() { 227 @Override 228 public boolean match(InetSocketAddress s) { 229 return NetUtils.isLocalAddress(s.getAddress()); 230 }; 231 }; 232 233 /** 234 * Whether the pathname is valid. Currently prohibits relative paths, 235 * names which contain a ":" or "//", or other non-canonical paths. 236 */ 237 public static boolean isValidName(String src) { 238 // Path must be absolute. 239 if (!src.startsWith(Path.SEPARATOR)) { 240 return false; 241 } 242 243 // Check for ".." "." ":" "/" 244 String[] components = StringUtils.split(src, '/'); 245 for (int i = 0; i < components.length; i++) { 246 String element = components[i]; 247 if (element.equals(".") || 248 (element.indexOf(":") >= 0) || 249 (element.indexOf("/") >= 0)) { 250 return false; 251 } 252 // ".." is allowed in path starting with /.reserved/.inodes 253 if (element.equals("..")) { 254 if (components.length > 4 255 && components[1].equals(FSDirectory.DOT_RESERVED_STRING) 256 && components[2].equals(FSDirectory.DOT_INODES_STRING)) { 257 continue; 258 } 259 return false; 260 } 261 // The string may start or end with a /, but not have 262 // "//" in the middle. 263 if (element.isEmpty() && i != components.length - 1 && 264 i != 0) { 265 return false; 266 } 267 } 268 return true; 269 } 270 271 /** 272 * Checks if a string is a valid path component. For instance, components 273 * cannot contain a ":" or "/", and cannot be equal to a reserved component 274 * like ".snapshot". 275 * <p> 276 * The primary use of this method is for validating paths when loading the 277 * FSImage. During normal NN operation, paths are sometimes allowed to 278 * contain reserved components. 279 * 280 * @return If component is valid 281 */ 282 public static boolean isValidNameForComponent(String component) { 283 if (component.equals(".") || 284 component.equals("..") || 285 component.indexOf(":") >= 0 || 286 component.indexOf("/") >= 0) { 287 return false; 288 } 289 return !isReservedPathComponent(component); 290 } 291 292 293 /** 294 * Returns if the component is reserved. 295 * 296 * <p> 297 * Note that some components are only reserved under certain directories, e.g. 298 * "/.reserved" is reserved, while "/hadoop/.reserved" is not. 299 * @return true, if the component is reserved 300 */ 301 public static boolean isReservedPathComponent(String component) { 302 for (String reserved : HdfsConstants.RESERVED_PATH_COMPONENTS) { 303 if (component.equals(reserved)) { 304 return true; 305 } 306 } 307 return false; 308 } 309 310 /** 311 * Converts a byte array to a string using UTF8 encoding. 312 */ 313 public static String bytes2String(byte[] bytes) { 314 return bytes2String(bytes, 0, bytes.length); 315 } 316 317 /** 318 * Decode a specific range of bytes of the given byte array to a string 319 * using UTF8. 320 * 321 * @param bytes The bytes to be decoded into characters 322 * @param offset The index of the first byte to decode 323 * @param length The number of bytes to decode 324 * @return The decoded string 325 */ 326 public static String bytes2String(byte[] bytes, int offset, int length) { 327 try { 328 return new String(bytes, offset, length, "UTF8"); 329 } catch(UnsupportedEncodingException e) { 330 assert false : "UTF8 encoding is not supported "; 331 } 332 return null; 333 } 334 335 /** 336 * Converts a string to a byte array using UTF8 encoding. 337 */ 338 public static byte[] string2Bytes(String str) { 339 return str.getBytes(Charsets.UTF_8); 340 } 341 342 /** 343 * Given a list of path components returns a path as a UTF8 String 344 */ 345 public static String byteArray2PathString(byte[][] pathComponents, 346 int offset, int length) { 347 if (pathComponents.length == 0) { 348 return ""; 349 } 350 Preconditions.checkArgument(offset >= 0 && offset < pathComponents.length); 351 Preconditions.checkArgument(length >= 0 && offset + length <= 352 pathComponents.length); 353 if (pathComponents.length == 1 354 && (pathComponents[0] == null || pathComponents[0].length == 0)) { 355 return Path.SEPARATOR; 356 } 357 StringBuilder result = new StringBuilder(); 358 for (int i = offset; i < offset + length; i++) { 359 result.append(new String(pathComponents[i], Charsets.UTF_8)); 360 if (i < pathComponents.length - 1) { 361 result.append(Path.SEPARATOR_CHAR); 362 } 363 } 364 return result.toString(); 365 } 366 367 public static String byteArray2PathString(byte[][] pathComponents) { 368 return byteArray2PathString(pathComponents, 0, pathComponents.length); 369 } 370 371 /** 372 * Converts a list of path components into a path using Path.SEPARATOR. 373 * 374 * @param components Path components 375 * @return Combined path as a UTF-8 string 376 */ 377 public static String strings2PathString(String[] components) { 378 if (components.length == 0) { 379 return ""; 380 } 381 if (components.length == 1) { 382 if (components[0] == null || components[0].isEmpty()) { 383 return Path.SEPARATOR; 384 } 385 } 386 return Joiner.on(Path.SEPARATOR).join(components); 387 } 388 389 /** 390 * Given a list of path components returns a byte array 391 */ 392 public static byte[] byteArray2bytes(byte[][] pathComponents) { 393 if (pathComponents.length == 0) { 394 return EMPTY_BYTES; 395 } else if (pathComponents.length == 1 396 && (pathComponents[0] == null || pathComponents[0].length == 0)) { 397 return new byte[]{(byte) Path.SEPARATOR_CHAR}; 398 } 399 int length = 0; 400 for (int i = 0; i < pathComponents.length; i++) { 401 length += pathComponents[i].length; 402 if (i < pathComponents.length - 1) { 403 length++; // for SEPARATOR 404 } 405 } 406 byte[] path = new byte[length]; 407 int index = 0; 408 for (int i = 0; i < pathComponents.length; i++) { 409 System.arraycopy(pathComponents[i], 0, path, index, 410 pathComponents[i].length); 411 index += pathComponents[i].length; 412 if (i < pathComponents.length - 1) { 413 path[index] = (byte) Path.SEPARATOR_CHAR; 414 index++; 415 } 416 } 417 return path; 418 } 419 420 /** Convert an object representing a path to a string. */ 421 public static String path2String(final Object path) { 422 return path == null? null 423 : path instanceof String? (String)path 424 : path instanceof byte[][]? byteArray2PathString((byte[][])path) 425 : path.toString(); 426 } 427 428 /** 429 * Splits the array of bytes into array of arrays of bytes 430 * on byte separator 431 * @param bytes the array of bytes to split 432 * @param separator the delimiting byte 433 */ 434 public static byte[][] bytes2byteArray(byte[] bytes, byte separator) { 435 return bytes2byteArray(bytes, bytes.length, separator); 436 } 437 438 /** 439 * Splits first len bytes in bytes to array of arrays of bytes 440 * on byte separator 441 * @param bytes the byte array to split 442 * @param len the number of bytes to split 443 * @param separator the delimiting byte 444 */ 445 public static byte[][] bytes2byteArray(byte[] bytes, 446 int len, 447 byte separator) { 448 assert len <= bytes.length; 449 int splits = 0; 450 if (len == 0) { 451 return new byte[][]{null}; 452 } 453 // Count the splits. Omit multiple separators and the last one 454 for (int i = 0; i < len; i++) { 455 if (bytes[i] == separator) { 456 splits++; 457 } 458 } 459 int last = len - 1; 460 while (last > -1 && bytes[last--] == separator) { 461 splits--; 462 } 463 if (splits == 0 && bytes[0] == separator) { 464 return new byte[][]{null}; 465 } 466 splits++; 467 byte[][] result = new byte[splits][]; 468 int startIndex = 0; 469 int nextIndex = 0; 470 int index = 0; 471 // Build the splits 472 while (index < splits) { 473 while (nextIndex < len && bytes[nextIndex] != separator) { 474 nextIndex++; 475 } 476 result[index] = new byte[nextIndex - startIndex]; 477 System.arraycopy(bytes, startIndex, result[index], 0, nextIndex 478 - startIndex); 479 index++; 480 startIndex = nextIndex + 1; 481 nextIndex = startIndex; 482 } 483 return result; 484 } 485 486 /** 487 * Convert a LocatedBlocks to BlockLocations[] 488 * @param blocks a LocatedBlocks 489 * @return an array of BlockLocations 490 */ 491 public static BlockLocation[] locatedBlocks2Locations(LocatedBlocks blocks) { 492 if (blocks == null) { 493 return new BlockLocation[0]; 494 } 495 return locatedBlocks2Locations(blocks.getLocatedBlocks()); 496 } 497 498 /** 499 * Convert a List<LocatedBlock> to BlockLocation[] 500 * @param blocks A List<LocatedBlock> to be converted 501 * @return converted array of BlockLocation 502 */ 503 public static BlockLocation[] locatedBlocks2Locations(List<LocatedBlock> blocks) { 504 if (blocks == null) { 505 return new BlockLocation[0]; 506 } 507 int nrBlocks = blocks.size(); 508 BlockLocation[] blkLocations = new BlockLocation[nrBlocks]; 509 if (nrBlocks == 0) { 510 return blkLocations; 511 } 512 int idx = 0; 513 for (LocatedBlock blk : blocks) { 514 assert idx < nrBlocks : "Incorrect index"; 515 DatanodeInfo[] locations = blk.getLocations(); 516 String[] hosts = new String[locations.length]; 517 String[] xferAddrs = new String[locations.length]; 518 String[] racks = new String[locations.length]; 519 for (int hCnt = 0; hCnt < locations.length; hCnt++) { 520 hosts[hCnt] = locations[hCnt].getHostName(); 521 xferAddrs[hCnt] = locations[hCnt].getXferAddr(); 522 NodeBase node = new NodeBase(xferAddrs[hCnt], 523 locations[hCnt].getNetworkLocation()); 524 racks[hCnt] = node.toString(); 525 } 526 DatanodeInfo[] cachedLocations = blk.getCachedLocations(); 527 String[] cachedHosts = new String[cachedLocations.length]; 528 for (int i=0; i<cachedLocations.length; i++) { 529 cachedHosts[i] = cachedLocations[i].getHostName(); 530 } 531 blkLocations[idx] = new BlockLocation(xferAddrs, hosts, cachedHosts, 532 racks, 533 blk.getStartOffset(), 534 blk.getBlockSize(), 535 blk.isCorrupt()); 536 idx++; 537 } 538 return blkLocations; 539 } 540 541 /** 542 * Returns collection of nameservice Ids from the configuration. 543 * @param conf configuration 544 * @return collection of nameservice Ids, or null if not specified 545 */ 546 public static Collection<String> getNameServiceIds(Configuration conf) { 547 return conf.getTrimmedStringCollection(DFS_NAMESERVICES); 548 } 549 550 /** 551 * @return <code>coll</code> if it is non-null and non-empty. Otherwise, 552 * returns a list with a single null value. 553 */ 554 private static Collection<String> emptyAsSingletonNull(Collection<String> coll) { 555 if (coll == null || coll.isEmpty()) { 556 return Collections.singletonList(null); 557 } else { 558 return coll; 559 } 560 } 561 562 /** 563 * Namenode HighAvailability related configuration. 564 * Returns collection of namenode Ids from the configuration. One logical id 565 * for each namenode in the in the HA setup. 566 * 567 * @param conf configuration 568 * @param nsId the nameservice ID to look at, or null for non-federated 569 * @return collection of namenode Ids 570 */ 571 public static Collection<String> getNameNodeIds(Configuration conf, String nsId) { 572 String key = addSuffix(DFS_HA_NAMENODES_KEY_PREFIX, nsId); 573 return conf.getTrimmedStringCollection(key); 574 } 575 576 /** 577 * Given a list of keys in the order of preference, returns a value 578 * for the key in the given order from the configuration. 579 * @param defaultValue default value to return, when key was not found 580 * @param keySuffix suffix to add to the key, if it is not null 581 * @param conf Configuration 582 * @param keys list of keys in the order of preference 583 * @return value of the key or default if a key was not found in configuration 584 */ 585 private static String getConfValue(String defaultValue, String keySuffix, 586 Configuration conf, String... keys) { 587 String value = null; 588 for (String key : keys) { 589 key = addSuffix(key, keySuffix); 590 value = conf.get(key); 591 if (value != null) { 592 break; 593 } 594 } 595 if (value == null) { 596 value = defaultValue; 597 } 598 return value; 599 } 600 601 /** Add non empty and non null suffix to a key */ 602 private static String addSuffix(String key, String suffix) { 603 if (suffix == null || suffix.isEmpty()) { 604 return key; 605 } 606 assert !suffix.startsWith(".") : 607 "suffix '" + suffix + "' should not already have '.' prepended."; 608 return key + "." + suffix; 609 } 610 611 /** Concatenate list of suffix strings '.' separated */ 612 private static String concatSuffixes(String... suffixes) { 613 if (suffixes == null) { 614 return null; 615 } 616 return Joiner.on(".").skipNulls().join(suffixes); 617 } 618 619 /** 620 * Return configuration key of format key.suffix1.suffix2...suffixN 621 */ 622 public static String addKeySuffixes(String key, String... suffixes) { 623 String keySuffix = concatSuffixes(suffixes); 624 return addSuffix(key, keySuffix); 625 } 626 627 /** 628 * Returns the configured address for all NameNodes in the cluster. 629 * @param conf configuration 630 * @param defaultAddress default address to return in case key is not found. 631 * @param keys Set of keys to look for in the order of preference 632 * @return a map(nameserviceId to map(namenodeId to InetSocketAddress)) 633 */ 634 private static Map<String, Map<String, InetSocketAddress>> 635 getAddresses(Configuration conf, String defaultAddress, String... keys) { 636 Collection<String> nameserviceIds = getNameServiceIds(conf); 637 return getAddressesForNsIds(conf, nameserviceIds, defaultAddress, keys); 638 } 639 640 /** 641 * Returns the configured address for all NameNodes in the cluster. 642 * @param conf configuration 643 * @param nsIds 644 *@param defaultAddress default address to return in case key is not found. 645 * @param keys Set of keys to look for in the order of preference @return a map(nameserviceId to map(namenodeId to InetSocketAddress)) 646 */ 647 private static Map<String, Map<String, InetSocketAddress>> 648 getAddressesForNsIds(Configuration conf, Collection<String> nsIds, 649 String defaultAddress, String... keys) { 650 // Look for configurations of the form <key>[.<nameserviceId>][.<namenodeId>] 651 // across all of the configured nameservices and namenodes. 652 Map<String, Map<String, InetSocketAddress>> ret = Maps.newLinkedHashMap(); 653 for (String nsId : emptyAsSingletonNull(nsIds)) { 654 Map<String, InetSocketAddress> isas = 655 getAddressesForNameserviceId(conf, nsId, defaultAddress, keys); 656 if (!isas.isEmpty()) { 657 ret.put(nsId, isas); 658 } 659 } 660 return ret; 661 } 662 663 /** 664 * Get all of the RPC addresses of the individual NNs in a given nameservice. 665 * 666 * @param conf Configuration 667 * @param nsId the nameservice whose NNs addresses we want. 668 * @param defaultValue default address to return in case key is not found. 669 * @return A map from nnId -> RPC address of each NN in the nameservice. 670 */ 671 public static Map<String, InetSocketAddress> getRpcAddressesForNameserviceId( 672 Configuration conf, String nsId, String defaultValue) { 673 return getAddressesForNameserviceId(conf, nsId, defaultValue, 674 DFS_NAMENODE_RPC_ADDRESS_KEY); 675 } 676 677 private static Map<String, InetSocketAddress> getAddressesForNameserviceId( 678 Configuration conf, String nsId, String defaultValue, 679 String... keys) { 680 Collection<String> nnIds = getNameNodeIds(conf, nsId); 681 Map<String, InetSocketAddress> ret = Maps.newHashMap(); 682 for (String nnId : emptyAsSingletonNull(nnIds)) { 683 String suffix = concatSuffixes(nsId, nnId); 684 String address = getConfValue(defaultValue, suffix, conf, keys); 685 if (address != null) { 686 InetSocketAddress isa = NetUtils.createSocketAddr(address); 687 if (isa.isUnresolved()) { 688 LOG.warn("Namenode for " + nsId + 689 " remains unresolved for ID " + nnId + 690 ". Check your hdfs-site.xml file to " + 691 "ensure namenodes are configured properly."); 692 } 693 ret.put(nnId, isa); 694 } 695 } 696 return ret; 697 } 698 699 /** 700 * @return a collection of all configured NN Kerberos principals. 701 */ 702 public static Set<String> getAllNnPrincipals(Configuration conf) throws IOException { 703 Set<String> principals = new HashSet<String>(); 704 for (String nsId : DFSUtil.getNameServiceIds(conf)) { 705 if (HAUtil.isHAEnabled(conf, nsId)) { 706 for (String nnId : DFSUtil.getNameNodeIds(conf, nsId)) { 707 Configuration confForNn = new Configuration(conf); 708 NameNode.initializeGenericKeys(confForNn, nsId, nnId); 709 String principal = SecurityUtil.getServerPrincipal(confForNn 710 .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY), 711 NameNode.getAddress(confForNn).getHostName()); 712 principals.add(principal); 713 } 714 } else { 715 Configuration confForNn = new Configuration(conf); 716 NameNode.initializeGenericKeys(confForNn, nsId, null); 717 String principal = SecurityUtil.getServerPrincipal(confForNn 718 .get(DFSConfigKeys.DFS_NAMENODE_KERBEROS_PRINCIPAL_KEY), 719 NameNode.getAddress(confForNn).getHostName()); 720 principals.add(principal); 721 } 722 } 723 724 return principals; 725 } 726 727 /** 728 * Returns list of InetSocketAddress corresponding to HA NN RPC addresses from 729 * the configuration. 730 * 731 * @param conf configuration 732 * @return list of InetSocketAddresses 733 */ 734 public static Map<String, Map<String, InetSocketAddress>> getHaNnRpcAddresses( 735 Configuration conf) { 736 return getAddresses(conf, null, DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY); 737 } 738 739 /** 740 * Returns list of InetSocketAddress corresponding to HA NN HTTP addresses from 741 * the configuration. 742 * 743 * @return list of InetSocketAddresses 744 */ 745 public static Map<String, Map<String, InetSocketAddress>> getHaNnWebHdfsAddresses( 746 Configuration conf, String scheme) { 747 if (WebHdfsFileSystem.SCHEME.equals(scheme)) { 748 return getAddresses(conf, null, 749 DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY); 750 } else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) { 751 return getAddresses(conf, null, 752 DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY); 753 } else { 754 throw new IllegalArgumentException("Unsupported scheme: " + scheme); 755 } 756 } 757 758 /** 759 * Returns list of InetSocketAddress corresponding to backup node rpc 760 * addresses from the configuration. 761 * 762 * @param conf configuration 763 * @return list of InetSocketAddresses 764 * @throws IOException on error 765 */ 766 public static Map<String, Map<String, InetSocketAddress>> getBackupNodeAddresses( 767 Configuration conf) throws IOException { 768 Map<String, Map<String, InetSocketAddress>> addressList = getAddresses(conf, 769 null, DFS_NAMENODE_BACKUP_ADDRESS_KEY); 770 if (addressList.isEmpty()) { 771 throw new IOException("Incorrect configuration: backup node address " 772 + DFS_NAMENODE_BACKUP_ADDRESS_KEY + " is not configured."); 773 } 774 return addressList; 775 } 776 777 /** 778 * Returns list of InetSocketAddresses of corresponding to secondary namenode 779 * http addresses from the configuration. 780 * 781 * @param conf configuration 782 * @return list of InetSocketAddresses 783 * @throws IOException on error 784 */ 785 public static Map<String, Map<String, InetSocketAddress>> getSecondaryNameNodeAddresses( 786 Configuration conf) throws IOException { 787 Map<String, Map<String, InetSocketAddress>> addressList = getAddresses(conf, null, 788 DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY); 789 if (addressList.isEmpty()) { 790 throw new IOException("Incorrect configuration: secondary namenode address " 791 + DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY + " is not configured."); 792 } 793 return addressList; 794 } 795 796 /** 797 * Returns list of InetSocketAddresses corresponding to namenodes from the 798 * configuration. 799 * 800 * Returns namenode address specifically configured for datanodes (using 801 * service ports), if found. If not, regular RPC address configured for other 802 * clients is returned. 803 * 804 * @param conf configuration 805 * @return list of InetSocketAddress 806 * @throws IOException on error 807 */ 808 public static Map<String, Map<String, InetSocketAddress>> getNNServiceRpcAddresses( 809 Configuration conf) throws IOException { 810 // Use default address as fall back 811 String defaultAddress; 812 try { 813 defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf)); 814 } catch (IllegalArgumentException e) { 815 defaultAddress = null; 816 } 817 818 Map<String, Map<String, InetSocketAddress>> addressList = 819 getAddresses(conf, defaultAddress, 820 DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY); 821 if (addressList.isEmpty()) { 822 throw new IOException("Incorrect configuration: namenode address " 823 + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or " 824 + DFS_NAMENODE_RPC_ADDRESS_KEY 825 + " is not configured."); 826 } 827 return addressList; 828 } 829 830 /** 831 * Returns list of InetSocketAddresses corresponding to the namenode 832 * that manages this cluster. Note this is to be used by datanodes to get 833 * the list of namenode addresses to talk to. 834 * 835 * Returns namenode address specifically configured for datanodes (using 836 * service ports), if found. If not, regular RPC address configured for other 837 * clients is returned. 838 * 839 * @param conf configuration 840 * @return list of InetSocketAddress 841 * @throws IOException on error 842 */ 843 public static Map<String, Map<String, InetSocketAddress>> 844 getNNServiceRpcAddressesForCluster(Configuration conf) throws IOException { 845 // Use default address as fall back 846 String defaultAddress; 847 try { 848 defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf)); 849 } catch (IllegalArgumentException e) { 850 defaultAddress = null; 851 } 852 853 Collection<String> parentNameServices = conf.getTrimmedStringCollection 854 (DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY); 855 856 if (parentNameServices.isEmpty()) { 857 parentNameServices = conf.getTrimmedStringCollection 858 (DFSConfigKeys.DFS_NAMESERVICES); 859 } else { 860 // Ensure that the internal service is ineed in the list of all available 861 // nameservices. 862 Set<String> availableNameServices = Sets.newHashSet(conf 863 .getTrimmedStringCollection(DFSConfigKeys.DFS_NAMESERVICES)); 864 for (String nsId : parentNameServices) { 865 if (!availableNameServices.contains(nsId)) { 866 throw new IOException("Unknown nameservice: " + nsId); 867 } 868 } 869 } 870 871 Map<String, Map<String, InetSocketAddress>> addressList = 872 getAddressesForNsIds(conf, parentNameServices, defaultAddress, 873 DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY); 874 if (addressList.isEmpty()) { 875 throw new IOException("Incorrect configuration: namenode address " 876 + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or " 877 + DFS_NAMENODE_RPC_ADDRESS_KEY 878 + " is not configured."); 879 } 880 return addressList; 881 } 882 883 /** 884 * Flatten the given map, as returned by other functions in this class, 885 * into a flat list of {@link ConfiguredNNAddress} instances. 886 */ 887 public static List<ConfiguredNNAddress> flattenAddressMap( 888 Map<String, Map<String, InetSocketAddress>> map) { 889 List<ConfiguredNNAddress> ret = Lists.newArrayList(); 890 891 for (Map.Entry<String, Map<String, InetSocketAddress>> entry : 892 map.entrySet()) { 893 String nsId = entry.getKey(); 894 Map<String, InetSocketAddress> nnMap = entry.getValue(); 895 for (Map.Entry<String, InetSocketAddress> e2 : nnMap.entrySet()) { 896 String nnId = e2.getKey(); 897 InetSocketAddress addr = e2.getValue(); 898 899 ret.add(new ConfiguredNNAddress(nsId, nnId, addr)); 900 } 901 } 902 return ret; 903 } 904 905 /** 906 * Format the given map, as returned by other functions in this class, 907 * into a string suitable for debugging display. The format of this string 908 * should not be considered an interface, and is liable to change. 909 */ 910 public static String addressMapToString( 911 Map<String, Map<String, InetSocketAddress>> map) { 912 StringBuilder b = new StringBuilder(); 913 for (Map.Entry<String, Map<String, InetSocketAddress>> entry : 914 map.entrySet()) { 915 String nsId = entry.getKey(); 916 Map<String, InetSocketAddress> nnMap = entry.getValue(); 917 b.append("Nameservice <").append(nsId).append(">:").append("\n"); 918 for (Map.Entry<String, InetSocketAddress> e2 : nnMap.entrySet()) { 919 b.append(" NN ID ").append(e2.getKey()) 920 .append(" => ").append(e2.getValue()).append("\n"); 921 } 922 } 923 return b.toString(); 924 } 925 926 public static String nnAddressesAsString(Configuration conf) { 927 Map<String, Map<String, InetSocketAddress>> addresses = 928 getHaNnRpcAddresses(conf); 929 return addressMapToString(addresses); 930 } 931 932 /** 933 * Represent one of the NameNodes configured in the cluster. 934 */ 935 public static class ConfiguredNNAddress { 936 private final String nameserviceId; 937 private final String namenodeId; 938 private final InetSocketAddress addr; 939 940 private ConfiguredNNAddress(String nameserviceId, String namenodeId, 941 InetSocketAddress addr) { 942 this.nameserviceId = nameserviceId; 943 this.namenodeId = namenodeId; 944 this.addr = addr; 945 } 946 947 public String getNameserviceId() { 948 return nameserviceId; 949 } 950 951 public String getNamenodeId() { 952 return namenodeId; 953 } 954 955 public InetSocketAddress getAddress() { 956 return addr; 957 } 958 959 @Override 960 public String toString() { 961 return "ConfiguredNNAddress[nsId=" + nameserviceId + ";" + 962 "nnId=" + namenodeId + ";addr=" + addr + "]"; 963 } 964 } 965 966 /** @return Internal name services specified in the conf. */ 967 static Collection<String> getInternalNameServices(Configuration conf) { 968 final Collection<String> ids = conf.getTrimmedStringCollection( 969 DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY); 970 return !ids.isEmpty()? ids: getNameServiceIds(conf); 971 } 972 973 /** 974 * Get a URI for each internal nameservice. If a nameservice is 975 * HA-enabled, then the logical URI of the nameservice is returned. If the 976 * nameservice is not HA-enabled, then a URI corresponding to an RPC address 977 * of the single NN for that nameservice is returned, preferring the service 978 * RPC address over the client RPC address. 979 * 980 * @param conf configuration 981 * @return a collection of all configured NN URIs, preferring service 982 * addresses 983 */ 984 public static Collection<URI> getInternalNsRpcUris(Configuration conf) { 985 return getNameServiceUris(conf, getInternalNameServices(conf), 986 DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, 987 DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY); 988 } 989 990 /** 991 * Get a URI for each configured nameservice. If a nameservice is 992 * HA-enabled, then the logical URI of the nameservice is returned. If the 993 * nameservice is not HA-enabled, then a URI corresponding to the address of 994 * the single NN for that nameservice is returned. 995 * 996 * @param conf configuration 997 * @param keys configuration keys to try in order to get the URI for non-HA 998 * nameservices 999 * @return a collection of all configured NN URIs 1000 */ 1001 static Collection<URI> getNameServiceUris(Configuration conf, 1002 Collection<String> nameServices, String... keys) { 1003 Set<URI> ret = new HashSet<URI>(); 1004 1005 // We're passed multiple possible configuration keys for any given NN or HA 1006 // nameservice, and search the config in order of these keys. In order to 1007 // make sure that a later config lookup (e.g. fs.defaultFS) doesn't add a 1008 // URI for a config key for which we've already found a preferred entry, we 1009 // keep track of non-preferred keys here. 1010 Set<URI> nonPreferredUris = new HashSet<URI>(); 1011 1012 for (String nsId : nameServices) { 1013 if (HAUtil.isHAEnabled(conf, nsId)) { 1014 // Add the logical URI of the nameservice. 1015 try { 1016 ret.add(new URI(HdfsConstants.HDFS_URI_SCHEME + "://" + nsId)); 1017 } catch (URISyntaxException ue) { 1018 throw new IllegalArgumentException(ue); 1019 } 1020 } else { 1021 // Add the URI corresponding to the address of the NN. 1022 boolean uriFound = false; 1023 for (String key : keys) { 1024 String addr = conf.get(concatSuffixes(key, nsId)); 1025 if (addr != null) { 1026 URI uri = createUri(HdfsConstants.HDFS_URI_SCHEME, 1027 NetUtils.createSocketAddr(addr)); 1028 if (!uriFound) { 1029 uriFound = true; 1030 ret.add(uri); 1031 } else { 1032 nonPreferredUris.add(uri); 1033 } 1034 } 1035 } 1036 } 1037 } 1038 1039 // Add the generic configuration keys. 1040 boolean uriFound = false; 1041 for (String key : keys) { 1042 String addr = conf.get(key); 1043 if (addr != null) { 1044 URI uri = createUri("hdfs", NetUtils.createSocketAddr(addr)); 1045 if (!uriFound) { 1046 uriFound = true; 1047 ret.add(uri); 1048 } else { 1049 nonPreferredUris.add(uri); 1050 } 1051 } 1052 } 1053 1054 // Add the default URI if it is an HDFS URI. 1055 URI defaultUri = FileSystem.getDefaultUri(conf); 1056 // checks if defaultUri is ip:port format 1057 // and convert it to hostname:port format 1058 if (defaultUri != null && (defaultUri.getPort() != -1)) { 1059 defaultUri = createUri(defaultUri.getScheme(), 1060 NetUtils.createSocketAddr(defaultUri.getHost(), 1061 defaultUri.getPort())); 1062 } 1063 if (defaultUri != null && 1064 HdfsConstants.HDFS_URI_SCHEME.equals(defaultUri.getScheme()) && 1065 !nonPreferredUris.contains(defaultUri)) { 1066 ret.add(defaultUri); 1067 } 1068 1069 return ret; 1070 } 1071 1072 /** 1073 * Given the InetSocketAddress this method returns the nameservice Id 1074 * corresponding to the key with matching address, by doing a reverse 1075 * lookup on the list of nameservices until it finds a match. 1076 * 1077 * Since the process of resolving URIs to Addresses is slightly expensive, 1078 * this utility method should not be used in performance-critical routines. 1079 * 1080 * @param conf - configuration 1081 * @param address - InetSocketAddress for configured communication with NN. 1082 * Configured addresses are typically given as URIs, but we may have to 1083 * compare against a URI typed in by a human, or the server name may be 1084 * aliased, so we compare unambiguous InetSocketAddresses instead of just 1085 * comparing URI substrings. 1086 * @param keys - list of configured communication parameters that should 1087 * be checked for matches. For example, to compare against RPC addresses, 1088 * provide the list DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, 1089 * DFS_NAMENODE_RPC_ADDRESS_KEY. Use the generic parameter keys, 1090 * not the NameServiceId-suffixed keys. 1091 * @return nameserviceId, or null if no match found 1092 */ 1093 public static String getNameServiceIdFromAddress(final Configuration conf, 1094 final InetSocketAddress address, String... keys) { 1095 // Configuration with a single namenode and no nameserviceId 1096 String[] ids = getSuffixIDs(conf, address, keys); 1097 return (ids != null) ? ids[0] : null; 1098 } 1099 1100 /** 1101 * return server http or https address from the configuration for a 1102 * given namenode rpc address. 1103 * @param namenodeAddr - namenode RPC address 1104 * @param conf configuration 1105 * @param scheme - the scheme (http / https) 1106 * @return server http or https address 1107 * @throws IOException 1108 */ 1109 public static URI getInfoServer(InetSocketAddress namenodeAddr, 1110 Configuration conf, String scheme) throws IOException { 1111 String[] suffixes = null; 1112 if (namenodeAddr != null) { 1113 // if non-default namenode, try reverse look up 1114 // the nameServiceID if it is available 1115 suffixes = getSuffixIDs(conf, namenodeAddr, 1116 DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, 1117 DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY); 1118 } 1119 1120 String authority; 1121 if ("http".equals(scheme)) { 1122 authority = getSuffixedConf(conf, DFS_NAMENODE_HTTP_ADDRESS_KEY, 1123 DFS_NAMENODE_HTTP_ADDRESS_DEFAULT, suffixes); 1124 } else if ("https".equals(scheme)) { 1125 authority = getSuffixedConf(conf, DFS_NAMENODE_HTTPS_ADDRESS_KEY, 1126 DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT, suffixes); 1127 } else { 1128 throw new IllegalArgumentException("Invalid scheme:" + scheme); 1129 } 1130 1131 if (namenodeAddr != null) { 1132 authority = substituteForWildcardAddress(authority, 1133 namenodeAddr.getHostName()); 1134 } 1135 return URI.create(scheme + "://" + authority); 1136 } 1137 1138 /** 1139 * Lookup the HTTP / HTTPS address of the namenode, and replace its hostname 1140 * with defaultHost when it found out that the address is a wildcard / local 1141 * address. 1142 * 1143 * @param defaultHost 1144 * The default host name of the namenode. 1145 * @param conf 1146 * The configuration 1147 * @param scheme 1148 * HTTP or HTTPS 1149 * @throws IOException 1150 */ 1151 public static URI getInfoServerWithDefaultHost(String defaultHost, 1152 Configuration conf, final String scheme) throws IOException { 1153 URI configuredAddr = getInfoServer(null, conf, scheme); 1154 String authority = substituteForWildcardAddress( 1155 configuredAddr.getAuthority(), defaultHost); 1156 return URI.create(scheme + "://" + authority); 1157 } 1158 1159 /** 1160 * Determine whether HTTP or HTTPS should be used to connect to the remote 1161 * server. Currently the client only connects to the server via HTTPS if the 1162 * policy is set to HTTPS_ONLY. 1163 * 1164 * @return the scheme (HTTP / HTTPS) 1165 */ 1166 public static String getHttpClientScheme(Configuration conf) { 1167 HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf); 1168 return policy == HttpConfig.Policy.HTTPS_ONLY ? "https" : "http"; 1169 } 1170 1171 /** 1172 * Substitute a default host in the case that an address has been configured 1173 * with a wildcard. This is used, for example, when determining the HTTP 1174 * address of the NN -- if it's configured to bind to 0.0.0.0, we want to 1175 * substitute the hostname from the filesystem URI rather than trying to 1176 * connect to 0.0.0.0. 1177 * @param configuredAddress the address found in the configuration 1178 * @param defaultHost the host to substitute with, if configuredAddress 1179 * is a local/wildcard address. 1180 * @return the substituted address 1181 * @throws IOException if it is a wildcard address and security is enabled 1182 */ 1183 @VisibleForTesting 1184 static String substituteForWildcardAddress(String configuredAddress, 1185 String defaultHost) throws IOException { 1186 InetSocketAddress sockAddr = NetUtils.createSocketAddr(configuredAddress); 1187 InetSocketAddress defaultSockAddr = NetUtils.createSocketAddr(defaultHost 1188 + ":0"); 1189 final InetAddress addr = sockAddr.getAddress(); 1190 if (addr != null && addr.isAnyLocalAddress()) { 1191 if (UserGroupInformation.isSecurityEnabled() && 1192 defaultSockAddr.getAddress().isAnyLocalAddress()) { 1193 throw new IOException("Cannot use a wildcard address with security. " + 1194 "Must explicitly set bind address for Kerberos"); 1195 } 1196 return defaultHost + ":" + sockAddr.getPort(); 1197 } else { 1198 return configuredAddress; 1199 } 1200 } 1201 1202 private static String getSuffixedConf(Configuration conf, 1203 String key, String defaultVal, String[] suffixes) { 1204 String ret = conf.get(DFSUtil.addKeySuffixes(key, suffixes)); 1205 if (ret != null) { 1206 return ret; 1207 } 1208 return conf.get(key, defaultVal); 1209 } 1210 1211 /** 1212 * Sets the node specific setting into generic configuration key. Looks up 1213 * value of "key.nameserviceId.namenodeId" and if found sets that value into 1214 * generic key in the conf. If this is not found, falls back to 1215 * "key.nameserviceId" and then the unmodified key. 1216 * 1217 * Note that this only modifies the runtime conf. 1218 * 1219 * @param conf 1220 * Configuration object to lookup specific key and to set the value 1221 * to the key passed. Note the conf object is modified. 1222 * @param nameserviceId 1223 * nameservice Id to construct the node specific key. Pass null if 1224 * federation is not configuration. 1225 * @param nnId 1226 * namenode Id to construct the node specific key. Pass null if 1227 * HA is not configured. 1228 * @param keys 1229 * The key for which node specific value is looked up 1230 */ 1231 public static void setGenericConf(Configuration conf, 1232 String nameserviceId, String nnId, String... keys) { 1233 for (String key : keys) { 1234 String value = conf.get(addKeySuffixes(key, nameserviceId, nnId)); 1235 if (value != null) { 1236 conf.set(key, value); 1237 continue; 1238 } 1239 value = conf.get(addKeySuffixes(key, nameserviceId)); 1240 if (value != null) { 1241 conf.set(key, value); 1242 } 1243 } 1244 } 1245 1246 /** Return used as percentage of capacity */ 1247 public static float getPercentUsed(long used, long capacity) { 1248 return capacity <= 0 ? 100 : (used * 100.0f)/capacity; 1249 } 1250 1251 /** Return remaining as percentage of capacity */ 1252 public static float getPercentRemaining(long remaining, long capacity) { 1253 return capacity <= 0 ? 0 : (remaining * 100.0f)/capacity; 1254 } 1255 1256 /** Convert percentage to a string. */ 1257 public static String percent2String(double percentage) { 1258 return StringUtils.format("%.2f%%", percentage); 1259 } 1260 1261 /** 1262 * Round bytes to GiB (gibibyte) 1263 * @param bytes number of bytes 1264 * @return number of GiB 1265 */ 1266 public static int roundBytesToGB(long bytes) { 1267 return Math.round((float)bytes/ 1024 / 1024 / 1024); 1268 } 1269 1270 /** Create a {@link ClientDatanodeProtocol} proxy */ 1271 public static ClientDatanodeProtocol createClientDatanodeProtocolProxy( 1272 DatanodeID datanodeid, Configuration conf, int socketTimeout, 1273 boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException { 1274 return new ClientDatanodeProtocolTranslatorPB(datanodeid, conf, socketTimeout, 1275 connectToDnViaHostname, locatedBlock); 1276 } 1277 1278 /** Create {@link ClientDatanodeProtocol} proxy using kerberos ticket */ 1279 public static ClientDatanodeProtocol createClientDatanodeProtocolProxy( 1280 DatanodeID datanodeid, Configuration conf, int socketTimeout, 1281 boolean connectToDnViaHostname) throws IOException { 1282 return new ClientDatanodeProtocolTranslatorPB( 1283 datanodeid, conf, socketTimeout, connectToDnViaHostname); 1284 } 1285 1286 /** Create a {@link ClientDatanodeProtocol} proxy */ 1287 public static ClientDatanodeProtocol createClientDatanodeProtocolProxy( 1288 InetSocketAddress addr, UserGroupInformation ticket, Configuration conf, 1289 SocketFactory factory) throws IOException { 1290 return new ClientDatanodeProtocolTranslatorPB(addr, ticket, conf, factory); 1291 } 1292 1293 /** 1294 * Get nameservice Id for the {@link NameNode} based on namenode RPC address 1295 * matching the local node address. 1296 */ 1297 public static String getNamenodeNameServiceId(Configuration conf) { 1298 return getNameServiceId(conf, DFS_NAMENODE_RPC_ADDRESS_KEY); 1299 } 1300 1301 /** 1302 * Get nameservice Id for the BackupNode based on backup node RPC address 1303 * matching the local node address. 1304 */ 1305 public static String getBackupNameServiceId(Configuration conf) { 1306 return getNameServiceId(conf, DFS_NAMENODE_BACKUP_ADDRESS_KEY); 1307 } 1308 1309 /** 1310 * Get nameservice Id for the secondary node based on secondary http address 1311 * matching the local node address. 1312 */ 1313 public static String getSecondaryNameServiceId(Configuration conf) { 1314 return getNameServiceId(conf, DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY); 1315 } 1316 1317 /** 1318 * Get the nameservice Id by matching the {@code addressKey} with the 1319 * the address of the local node. 1320 * 1321 * If {@link DFSConfigKeys#DFS_NAMESERVICE_ID} is not specifically 1322 * configured, and more than one nameservice Id is configured, this method 1323 * determines the nameservice Id by matching the local node's address with the 1324 * configured addresses. When a match is found, it returns the nameservice Id 1325 * from the corresponding configuration key. 1326 * 1327 * @param conf Configuration 1328 * @param addressKey configuration key to get the address. 1329 * @return nameservice Id on success, null if federation is not configured. 1330 * @throws HadoopIllegalArgumentException on error 1331 */ 1332 private static String getNameServiceId(Configuration conf, String addressKey) { 1333 String nameserviceId = conf.get(DFS_NAMESERVICE_ID); 1334 if (nameserviceId != null) { 1335 return nameserviceId; 1336 } 1337 Collection<String> nsIds = getNameServiceIds(conf); 1338 if (1 == nsIds.size()) { 1339 return nsIds.toArray(new String[1])[0]; 1340 } 1341 String nnId = conf.get(DFS_HA_NAMENODE_ID_KEY); 1342 1343 return getSuffixIDs(conf, addressKey, null, nnId, LOCAL_ADDRESS_MATCHER)[0]; 1344 } 1345 1346 /** 1347 * Returns nameservice Id and namenode Id when the local host matches the 1348 * configuration parameter {@code addressKey}.<nameservice Id>.<namenode Id> 1349 * 1350 * @param conf Configuration 1351 * @param addressKey configuration key corresponding to the address. 1352 * @param knownNsId only look at configs for the given nameservice, if not-null 1353 * @param knownNNId only look at configs for the given namenode, if not null 1354 * @param matcher matching criteria for matching the address 1355 * @return Array with nameservice Id and namenode Id on success. First element 1356 * in the array is nameservice Id and second element is namenode Id. 1357 * Null value indicates that the configuration does not have the the 1358 * Id. 1359 * @throws HadoopIllegalArgumentException on error 1360 */ 1361 static String[] getSuffixIDs(final Configuration conf, final String addressKey, 1362 String knownNsId, String knownNNId, 1363 final AddressMatcher matcher) { 1364 String nameserviceId = null; 1365 String namenodeId = null; 1366 int found = 0; 1367 1368 Collection<String> nsIds = getNameServiceIds(conf); 1369 for (String nsId : emptyAsSingletonNull(nsIds)) { 1370 if (knownNsId != null && !knownNsId.equals(nsId)) { 1371 continue; 1372 } 1373 1374 Collection<String> nnIds = getNameNodeIds(conf, nsId); 1375 for (String nnId : emptyAsSingletonNull(nnIds)) { 1376 if (LOG.isTraceEnabled()) { 1377 LOG.trace(String.format("addressKey: %s nsId: %s nnId: %s", 1378 addressKey, nsId, nnId)); 1379 } 1380 if (knownNNId != null && !knownNNId.equals(nnId)) { 1381 continue; 1382 } 1383 String key = addKeySuffixes(addressKey, nsId, nnId); 1384 String addr = conf.get(key); 1385 if (addr == null) { 1386 continue; 1387 } 1388 InetSocketAddress s = null; 1389 try { 1390 s = NetUtils.createSocketAddr(addr); 1391 } catch (Exception e) { 1392 LOG.warn("Exception in creating socket address " + addr, e); 1393 continue; 1394 } 1395 if (!s.isUnresolved() && matcher.match(s)) { 1396 nameserviceId = nsId; 1397 namenodeId = nnId; 1398 found++; 1399 } 1400 } 1401 } 1402 if (found > 1) { // Only one address must match the local address 1403 String msg = "Configuration has multiple addresses that match " 1404 + "local node's address. Please configure the system with " 1405 + DFS_NAMESERVICE_ID + " and " 1406 + DFS_HA_NAMENODE_ID_KEY; 1407 throw new HadoopIllegalArgumentException(msg); 1408 } 1409 return new String[] { nameserviceId, namenodeId }; 1410 } 1411 1412 /** 1413 * For given set of {@code keys} adds nameservice Id and or namenode Id 1414 * and returns {nameserviceId, namenodeId} when address match is found. 1415 * @see #getSuffixIDs(Configuration, String, String, String, AddressMatcher) 1416 */ 1417 static String[] getSuffixIDs(final Configuration conf, 1418 final InetSocketAddress address, final String... keys) { 1419 AddressMatcher matcher = new AddressMatcher() { 1420 @Override 1421 public boolean match(InetSocketAddress s) { 1422 return address.equals(s); 1423 } 1424 }; 1425 1426 for (String key : keys) { 1427 String[] ids = getSuffixIDs(conf, key, null, null, matcher); 1428 if (ids != null && (ids [0] != null || ids[1] != null)) { 1429 return ids; 1430 } 1431 } 1432 return null; 1433 } 1434 1435 private interface AddressMatcher { 1436 public boolean match(InetSocketAddress s); 1437 } 1438 1439 /** Create a URI from the scheme and address */ 1440 public static URI createUri(String scheme, InetSocketAddress address) { 1441 try { 1442 return new URI(scheme, null, address.getHostName(), address.getPort(), 1443 null, null, null); 1444 } catch (URISyntaxException ue) { 1445 throw new IllegalArgumentException(ue); 1446 } 1447 } 1448 1449 /** 1450 * Add protobuf based protocol to the {@link org.apache.hadoop.ipc.RPC.Server} 1451 * @param conf configuration 1452 * @param protocol Protocol interface 1453 * @param service service that implements the protocol 1454 * @param server RPC server to which the protocol & implementation is added to 1455 * @throws IOException 1456 */ 1457 public static void addPBProtocol(Configuration conf, Class<?> protocol, 1458 BlockingService service, RPC.Server server) throws IOException { 1459 RPC.setProtocolEngine(conf, protocol, ProtobufRpcEngine.class); 1460 server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocol, service); 1461 } 1462 1463 /** 1464 * Map a logical namenode ID to its service address. Use the given 1465 * nameservice if specified, or the configured one if none is given. 1466 * 1467 * @param conf Configuration 1468 * @param nsId which nameservice nnId is a part of, optional 1469 * @param nnId the namenode ID to get the service addr for 1470 * @return the service addr, null if it could not be determined 1471 */ 1472 public static String getNamenodeServiceAddr(final Configuration conf, 1473 String nsId, String nnId) { 1474 1475 if (nsId == null) { 1476 nsId = getOnlyNameServiceIdOrNull(conf); 1477 } 1478 1479 String serviceAddrKey = concatSuffixes( 1480 DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nsId, nnId); 1481 1482 String addrKey = concatSuffixes( 1483 DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, nsId, nnId); 1484 1485 String serviceRpcAddr = conf.get(serviceAddrKey); 1486 if (serviceRpcAddr == null) { 1487 serviceRpcAddr = conf.get(addrKey); 1488 } 1489 return serviceRpcAddr; 1490 } 1491 1492 /** 1493 * If the configuration refers to only a single nameservice, return the 1494 * name of that nameservice. If it refers to 0 or more than 1, return null. 1495 */ 1496 public static String getOnlyNameServiceIdOrNull(Configuration conf) { 1497 Collection<String> nsIds = getNameServiceIds(conf); 1498 if (1 == nsIds.size()) { 1499 return nsIds.toArray(new String[1])[0]; 1500 } else { 1501 // No nameservice ID was given and more than one is configured 1502 return null; 1503 } 1504 } 1505 1506 public static final Options helpOptions = new Options(); 1507 public static final Option helpOpt = new Option("h", "help", false, 1508 "get help information"); 1509 1510 static { 1511 helpOptions.addOption(helpOpt); 1512 } 1513 1514 /** 1515 * Parse the arguments for commands 1516 * 1517 * @param args the argument to be parsed 1518 * @param helpDescription help information to be printed out 1519 * @param out Printer 1520 * @param printGenericCommandUsage whether to print the 1521 * generic command usage defined in ToolRunner 1522 * @return true when the argument matches help option, false if not 1523 */ 1524 public static boolean parseHelpArgument(String[] args, 1525 String helpDescription, PrintStream out, boolean printGenericCommandUsage) { 1526 if (args.length == 1) { 1527 try { 1528 CommandLineParser parser = new PosixParser(); 1529 CommandLine cmdLine = parser.parse(helpOptions, args); 1530 if (cmdLine.hasOption(helpOpt.getOpt()) 1531 || cmdLine.hasOption(helpOpt.getLongOpt())) { 1532 // should print out the help information 1533 out.println(helpDescription + "\n"); 1534 if (printGenericCommandUsage) { 1535 ToolRunner.printGenericCommandUsage(out); 1536 } 1537 return true; 1538 } 1539 } catch (ParseException pe) { 1540 return false; 1541 } 1542 } 1543 return false; 1544 } 1545 1546 /** 1547 * Get DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION from configuration. 1548 * 1549 * @param conf Configuration 1550 * @return Value of DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION 1551 */ 1552 public static float getInvalidateWorkPctPerIteration(Configuration conf) { 1553 float blocksInvalidateWorkPct = conf.getFloat( 1554 DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION, 1555 DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION_DEFAULT); 1556 Preconditions.checkArgument( 1557 (blocksInvalidateWorkPct > 0 && blocksInvalidateWorkPct <= 1.0f), 1558 DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION + 1559 " = '" + blocksInvalidateWorkPct + "' is invalid. " + 1560 "It should be a positive, non-zero float value, not greater than 1.0f, " + 1561 "to indicate a percentage."); 1562 return blocksInvalidateWorkPct; 1563 } 1564 1565 /** 1566 * Get DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION from 1567 * configuration. 1568 * 1569 * @param conf Configuration 1570 * @return Value of DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION 1571 */ 1572 public static int getReplWorkMultiplier(Configuration conf) { 1573 int blocksReplWorkMultiplier = conf.getInt( 1574 DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION, 1575 DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION_DEFAULT); 1576 Preconditions.checkArgument( 1577 (blocksReplWorkMultiplier > 0), 1578 DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION + 1579 " = '" + blocksReplWorkMultiplier + "' is invalid. " + 1580 "It should be a positive, non-zero integer value."); 1581 return blocksReplWorkMultiplier; 1582 } 1583 1584 /** 1585 * Get SPNEGO keytab Key from configuration 1586 * 1587 * @param conf Configuration 1588 * @param defaultKey default key to be used for config lookup 1589 * @return DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY if the key is not empty 1590 * else return defaultKey 1591 */ 1592 public static String getSpnegoKeytabKey(Configuration conf, String defaultKey) { 1593 String value = 1594 conf.get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY); 1595 return (value == null || value.isEmpty()) ? 1596 defaultKey : DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY; 1597 } 1598 1599 /** 1600 * Get http policy. Http Policy is chosen as follows: 1601 * <ol> 1602 * <li>If hadoop.ssl.enabled is set, http endpoints are not started. Only 1603 * https endpoints are started on configured https ports</li> 1604 * <li>This configuration is overridden by dfs.https.enable configuration, if 1605 * it is set to true. In that case, both http and https endpoints are stared.</li> 1606 * <li>All the above configurations are overridden by dfs.http.policy 1607 * configuration. With this configuration you can set http-only, https-only 1608 * and http-and-https endpoints.</li> 1609 * </ol> 1610 * See hdfs-default.xml documentation for more details on each of the above 1611 * configuration settings. 1612 */ 1613 public static HttpConfig.Policy getHttpPolicy(Configuration conf) { 1614 String policyStr = conf.get(DFSConfigKeys.DFS_HTTP_POLICY_KEY); 1615 if (policyStr == null) { 1616 boolean https = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, 1617 DFSConfigKeys.DFS_HTTPS_ENABLE_DEFAULT); 1618 1619 boolean hadoopSsl = conf.getBoolean( 1620 CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY, 1621 CommonConfigurationKeys.HADOOP_SSL_ENABLED_DEFAULT); 1622 1623 if (hadoopSsl) { 1624 LOG.warn(CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY 1625 + " is deprecated. Please use " + DFSConfigKeys.DFS_HTTP_POLICY_KEY 1626 + "."); 1627 } 1628 if (https) { 1629 LOG.warn(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY 1630 + " is deprecated. Please use " + DFSConfigKeys.DFS_HTTP_POLICY_KEY 1631 + "."); 1632 } 1633 1634 return (hadoopSsl || https) ? HttpConfig.Policy.HTTP_AND_HTTPS 1635 : HttpConfig.Policy.HTTP_ONLY; 1636 } 1637 1638 HttpConfig.Policy policy = HttpConfig.Policy.fromString(policyStr); 1639 if (policy == null) { 1640 throw new HadoopIllegalArgumentException("Unregonized value '" 1641 + policyStr + "' for " + DFSConfigKeys.DFS_HTTP_POLICY_KEY); 1642 } 1643 1644 conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name()); 1645 return policy; 1646 } 1647 1648 public static HttpServer2.Builder loadSslConfToHttpServerBuilder(HttpServer2.Builder builder, 1649 Configuration sslConf) { 1650 return builder 1651 .needsClientAuth( 1652 sslConf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, 1653 DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT)) 1654 .keyPassword(getPassword(sslConf, DFS_SERVER_HTTPS_KEYPASSWORD_KEY)) 1655 .keyStore(sslConf.get("ssl.server.keystore.location"), 1656 getPassword(sslConf, DFS_SERVER_HTTPS_KEYSTORE_PASSWORD_KEY), 1657 sslConf.get("ssl.server.keystore.type", "jks")) 1658 .trustStore(sslConf.get("ssl.server.truststore.location"), 1659 getPassword(sslConf, DFS_SERVER_HTTPS_TRUSTSTORE_PASSWORD_KEY), 1660 sslConf.get("ssl.server.truststore.type", "jks")); 1661 } 1662 1663 /** 1664 * Load HTTPS-related configuration. 1665 */ 1666 public static Configuration loadSslConfiguration(Configuration conf) { 1667 Configuration sslConf = new Configuration(false); 1668 1669 sslConf.addResource(conf.get( 1670 DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY, 1671 DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT)); 1672 1673 boolean requireClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, 1674 DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT); 1675 sslConf.setBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, requireClientAuth); 1676 return sslConf; 1677 } 1678 1679 /** 1680 * Return a HttpServer.Builder that the journalnode / namenode / secondary 1681 * namenode can use to initialize their HTTP / HTTPS server. 1682 * 1683 */ 1684 public static HttpServer2.Builder httpServerTemplateForNNAndJN( 1685 Configuration conf, final InetSocketAddress httpAddr, 1686 final InetSocketAddress httpsAddr, String name, String spnegoUserNameKey, 1687 String spnegoKeytabFileKey) throws IOException { 1688 HttpConfig.Policy policy = getHttpPolicy(conf); 1689 1690 HttpServer2.Builder builder = new HttpServer2.Builder().setName(name) 1691 .setConf(conf).setACL(new AccessControlList(conf.get(DFS_ADMIN, " "))) 1692 .setSecurityEnabled(UserGroupInformation.isSecurityEnabled()) 1693 .setUsernameConfKey(spnegoUserNameKey) 1694 .setKeytabConfKey(getSpnegoKeytabKey(conf, spnegoKeytabFileKey)); 1695 1696 // initialize the webserver for uploading/downloading files. 1697 if (UserGroupInformation.isSecurityEnabled()) { 1698 LOG.info("Starting web server as: " 1699 + SecurityUtil.getServerPrincipal(conf.get(spnegoUserNameKey), 1700 httpAddr.getHostName())); 1701 } 1702 1703 if (policy.isHttpEnabled()) { 1704 if (httpAddr.getPort() == 0) { 1705 builder.setFindPort(true); 1706 } 1707 1708 URI uri = URI.create("http://" + NetUtils.getHostPortString(httpAddr)); 1709 builder.addEndpoint(uri); 1710 LOG.info("Starting Web-server for " + name + " at: " + uri); 1711 } 1712 1713 if (policy.isHttpsEnabled() && httpsAddr != null) { 1714 Configuration sslConf = loadSslConfiguration(conf); 1715 loadSslConfToHttpServerBuilder(builder, sslConf); 1716 1717 if (httpsAddr.getPort() == 0) { 1718 builder.setFindPort(true); 1719 } 1720 1721 URI uri = URI.create("https://" + NetUtils.getHostPortString(httpsAddr)); 1722 builder.addEndpoint(uri); 1723 LOG.info("Starting Web-server for " + name + " at: " + uri); 1724 } 1725 return builder; 1726 } 1727 1728 /** 1729 * Leverages the Configuration.getPassword method to attempt to get 1730 * passwords from the CredentialProvider API before falling back to 1731 * clear text in config - if falling back is allowed. 1732 * @param conf Configuration instance 1733 * @param alias name of the credential to retreive 1734 * @return String credential value or null 1735 */ 1736 static String getPassword(Configuration conf, String alias) { 1737 String password = null; 1738 try { 1739 char[] passchars = conf.getPassword(alias); 1740 if (passchars != null) { 1741 password = new String(passchars); 1742 } 1743 } 1744 catch (IOException ioe) { 1745 password = null; 1746 } 1747 return password; 1748 } 1749 1750 /** 1751 * Converts a Date into an ISO-8601 formatted datetime string. 1752 */ 1753 public static String dateToIso8601String(Date date) { 1754 SimpleDateFormat df = 1755 new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ", Locale.ENGLISH); 1756 return df.format(date); 1757 } 1758 1759 /** 1760 * Converts a time duration in milliseconds into DDD:HH:MM:SS format. 1761 */ 1762 public static String durationToString(long durationMs) { 1763 boolean negative = false; 1764 if (durationMs < 0) { 1765 negative = true; 1766 durationMs = -durationMs; 1767 } 1768 // Chop off the milliseconds 1769 long durationSec = durationMs / 1000; 1770 final int secondsPerMinute = 60; 1771 final int secondsPerHour = 60*60; 1772 final int secondsPerDay = 60*60*24; 1773 final long days = durationSec / secondsPerDay; 1774 durationSec -= days * secondsPerDay; 1775 final long hours = durationSec / secondsPerHour; 1776 durationSec -= hours * secondsPerHour; 1777 final long minutes = durationSec / secondsPerMinute; 1778 durationSec -= minutes * secondsPerMinute; 1779 final long seconds = durationSec; 1780 final long milliseconds = durationMs % 1000; 1781 String format = "%03d:%02d:%02d:%02d.%03d"; 1782 if (negative) { 1783 format = "-" + format; 1784 } 1785 return String.format(format, days, hours, minutes, seconds, milliseconds); 1786 } 1787 1788 /** 1789 * Converts a relative time string into a duration in milliseconds. 1790 */ 1791 public static long parseRelativeTime(String relTime) throws IOException { 1792 if (relTime.length() < 2) { 1793 throw new IOException("Unable to parse relative time value of " + relTime 1794 + ": too short"); 1795 } 1796 String ttlString = relTime.substring(0, relTime.length()-1); 1797 long ttl; 1798 try { 1799 ttl = Long.parseLong(ttlString); 1800 } catch (NumberFormatException e) { 1801 throw new IOException("Unable to parse relative time value of " + relTime 1802 + ": " + ttlString + " is not a number"); 1803 } 1804 if (relTime.endsWith("s")) { 1805 // pass 1806 } else if (relTime.endsWith("m")) { 1807 ttl *= 60; 1808 } else if (relTime.endsWith("h")) { 1809 ttl *= 60*60; 1810 } else if (relTime.endsWith("d")) { 1811 ttl *= 60*60*24; 1812 } else { 1813 throw new IOException("Unable to parse relative time value of " + relTime 1814 + ": unknown time unit " + relTime.charAt(relTime.length() - 1)); 1815 } 1816 return ttl*1000; 1817 } 1818 1819 /** 1820 * Assert that all objects in the collection are equal. Returns silently if 1821 * so, throws an AssertionError if any object is not equal. All null values 1822 * are considered equal. 1823 * 1824 * @param objects the collection of objects to check for equality. 1825 */ 1826 public static void assertAllResultsEqual(Collection<?> objects) 1827 throws AssertionError { 1828 if (objects.size() == 0 || objects.size() == 1) 1829 return; 1830 1831 Object[] resultsArray = objects.toArray(); 1832 for (int i = 1; i < resultsArray.length; i++) { 1833 Object currElement = resultsArray[i]; 1834 Object lastElement = resultsArray[i - 1]; 1835 if ((currElement == null && currElement != lastElement) || 1836 (currElement != null && !currElement.equals(lastElement))) { 1837 throw new AssertionError("Not all elements match in results: " + 1838 Arrays.toString(resultsArray)); 1839 } 1840 } 1841 } 1842 1843 /** 1844 * Creates a new KeyProvider from the given Configuration. 1845 * 1846 * @param conf Configuration 1847 * @return new KeyProvider, or null if no provider was found. 1848 * @throws IOException if the KeyProvider is improperly specified in 1849 * the Configuration 1850 */ 1851 public static KeyProvider createKeyProvider( 1852 final Configuration conf) throws IOException { 1853 final String providerUriStr = 1854 conf.getTrimmed(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, ""); 1855 // No provider set in conf 1856 if (providerUriStr.isEmpty()) { 1857 return null; 1858 } 1859 final URI providerUri; 1860 try { 1861 providerUri = new URI(providerUriStr); 1862 } catch (URISyntaxException e) { 1863 throw new IOException(e); 1864 } 1865 KeyProvider keyProvider = KeyProviderFactory.get(providerUri, conf); 1866 if (keyProvider == null) { 1867 throw new IOException("Could not instantiate KeyProvider from " + 1868 DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI + " setting of '" + 1869 providerUriStr +"'"); 1870 } 1871 if (keyProvider.isTransient()) { 1872 throw new IOException("KeyProvider " + keyProvider.toString() 1873 + " was found but it is a transient provider."); 1874 } 1875 return keyProvider; 1876 } 1877 1878 /** 1879 * Creates a new KeyProviderCryptoExtension by wrapping the 1880 * KeyProvider specified in the given Configuration. 1881 * 1882 * @param conf Configuration 1883 * @return new KeyProviderCryptoExtension, or null if no provider was found. 1884 * @throws IOException if the KeyProvider is improperly specified in 1885 * the Configuration 1886 */ 1887 public static KeyProviderCryptoExtension createKeyProviderCryptoExtension( 1888 final Configuration conf) throws IOException { 1889 KeyProvider keyProvider = createKeyProvider(conf); 1890 if (keyProvider == null) { 1891 return null; 1892 } 1893 KeyProviderCryptoExtension cryptoProvider = KeyProviderCryptoExtension 1894 .createKeyProviderCryptoExtension(keyProvider); 1895 return cryptoProvider; 1896 } 1897 1898 /** 1899 * Probe for HDFS Encryption being enabled; this uses the value of 1900 * the option {@link DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI}, 1901 * returning true if that property contains a non-empty, non-whitespace 1902 * string. 1903 * @param conf configuration to probe 1904 * @return true if encryption is considered enabled. 1905 */ 1906 public static boolean isHDFSEncryptionEnabled(Configuration conf) { 1907 return !conf.getTrimmed( 1908 DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI, "").isEmpty(); 1909 } 1910 1911}