001 /** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018 package org.apache.hadoop.hdfs; 019 020 import static org.apache.hadoop.hdfs.DFSConfigKeys.*; 021 import java.io.IOException; 022 import java.net.InetSocketAddress; 023 import java.net.URI; 024 import java.net.URISyntaxException; 025 import java.util.ArrayList; 026 import java.util.Collection; 027 import java.util.Map; 028 029 import org.apache.commons.logging.Log; 030 import org.apache.commons.logging.LogFactory; 031 import org.apache.hadoop.HadoopIllegalArgumentException; 032 import org.apache.hadoop.conf.Configuration; 033 import org.apache.hadoop.fs.FileSystem; 034 import org.apache.hadoop.fs.Path; 035 import org.apache.hadoop.hdfs.protocol.HdfsConstants; 036 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; 037 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; 038 import org.apache.hadoop.hdfs.server.namenode.NameNode; 039 import org.apache.hadoop.io.Text; 040 import org.apache.hadoop.ipc.RPC; 041 import org.apache.hadoop.security.SecurityUtil; 042 import org.apache.hadoop.security.UserGroupInformation; 043 import org.apache.hadoop.security.token.Token; 044 import static org.apache.hadoop.hdfs.protocol.HdfsConstants.HA_DT_SERVICE_PREFIX; 045 046 import com.google.common.base.Joiner; 047 import com.google.common.base.Preconditions; 048 import com.google.common.collect.Lists; 049 050 public class HAUtil { 051 052 private static final Log LOG = 053 LogFactory.getLog(HAUtil.class); 054 055 private static final DelegationTokenSelector tokenSelector = 056 new DelegationTokenSelector(); 057 058 private HAUtil() { /* Hidden constructor */ } 059 060 /** 061 * Returns true if HA for namenode is configured for the given nameservice 062 * 063 * @param conf Configuration 064 * @param nsId nameservice, or null if no federated NS is configured 065 * @return true if HA is configured in the configuration; else false. 066 */ 067 public static boolean isHAEnabled(Configuration conf, String nsId) { 068 Map<String, Map<String, InetSocketAddress>> addresses = 069 DFSUtil.getHaNnRpcAddresses(conf); 070 if (addresses == null) return false; 071 Map<String, InetSocketAddress> nnMap = addresses.get(nsId); 072 return nnMap != null && nnMap.size() > 1; 073 } 074 075 /** 076 * Returns true if HA is using a shared edits directory. 077 * 078 * @param conf Configuration 079 * @return true if HA config is using a shared edits dir, false otherwise. 080 */ 081 public static boolean usesSharedEditsDir(Configuration conf) { 082 return null != conf.get(DFS_NAMENODE_SHARED_EDITS_DIR_KEY); 083 } 084 085 /** 086 * Get the namenode Id by matching the {@code addressKey} 087 * with the the address of the local node. 088 * 089 * If {@link DFSConfigKeys#DFS_HA_NAMENODE_ID_KEY} is not specifically 090 * configured, this method determines the namenode Id by matching the local 091 * node's address with the configured addresses. When a match is found, it 092 * returns the namenode Id from the corresponding configuration key. 093 * 094 * @param conf Configuration 095 * @return namenode Id on success, null on failure. 096 * @throws HadoopIllegalArgumentException on error 097 */ 098 public static String getNameNodeId(Configuration conf, String nsId) { 099 String namenodeId = conf.getTrimmed(DFS_HA_NAMENODE_ID_KEY); 100 if (namenodeId != null) { 101 return namenodeId; 102 } 103 104 String suffixes[] = DFSUtil.getSuffixIDs(conf, DFS_NAMENODE_RPC_ADDRESS_KEY, 105 nsId, null, DFSUtil.LOCAL_ADDRESS_MATCHER); 106 if (suffixes == null) { 107 String msg = "Configuration " + DFS_NAMENODE_RPC_ADDRESS_KEY + 108 " must be suffixed with nameservice and namenode ID for HA " + 109 "configuration."; 110 throw new HadoopIllegalArgumentException(msg); 111 } 112 113 return suffixes[1]; 114 } 115 116 /** 117 * Similar to 118 * {@link DFSUtil#getNameServiceIdFromAddress(Configuration, 119 * InetSocketAddress, String...)} 120 */ 121 public static String getNameNodeIdFromAddress(final Configuration conf, 122 final InetSocketAddress address, String... keys) { 123 // Configuration with a single namenode and no nameserviceId 124 String[] ids = DFSUtil.getSuffixIDs(conf, address, keys); 125 if (ids != null && ids.length > 1) { 126 return ids[1]; 127 } 128 return null; 129 } 130 131 /** 132 * Given the configuration for this node, return a Configuration object for 133 * the other node in an HA setup. 134 * 135 * @param myConf the configuration of this node 136 * @return the configuration of the other node in an HA setup 137 */ 138 public static Configuration getConfForOtherNode( 139 Configuration myConf) { 140 141 String nsId = DFSUtil.getNamenodeNameServiceId(myConf); 142 Preconditions.checkArgument(nsId != null, 143 "Could not determine namespace id. Please ensure that this " + 144 "machine is one of the machines listed as a NN RPC address, " + 145 "or configure " + DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID); 146 147 Collection<String> nnIds = DFSUtil.getNameNodeIds(myConf, nsId); 148 String myNNId = myConf.get(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY); 149 Preconditions.checkArgument(nnIds != null, 150 "Could not determine namenode ids in namespace '%s'. " + 151 "Please configure " + 152 DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX, 153 nsId), 154 nsId); 155 Preconditions.checkArgument(nnIds.size() == 2, 156 "Expected exactly 2 NameNodes in namespace '%s'. " + 157 "Instead, got only %s (NN ids were '%s'", 158 nsId, nnIds.size(), Joiner.on("','").join(nnIds)); 159 Preconditions.checkState(myNNId != null && !myNNId.isEmpty(), 160 "Could not determine own NN ID in namespace '%s'. Please " + 161 "ensure that this node is one of the machines listed as an " + 162 "NN RPC address, or configure " + DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, 163 nsId); 164 165 ArrayList<String> nnSet = Lists.newArrayList(nnIds); 166 nnSet.remove(myNNId); 167 assert nnSet.size() == 1; 168 String activeNN = nnSet.get(0); 169 170 // Look up the address of the active NN. 171 Configuration confForOtherNode = new Configuration(myConf); 172 NameNode.initializeGenericKeys(confForOtherNode, nsId, activeNN); 173 return confForOtherNode; 174 } 175 176 /** 177 * This is used only by tests at the moment. 178 * @return true if the NN should allow read operations while in standby mode. 179 */ 180 public static boolean shouldAllowStandbyReads(Configuration conf) { 181 return conf.getBoolean("dfs.ha.allow.stale.reads", false); 182 } 183 184 public static void setAllowStandbyReads(Configuration conf, boolean val) { 185 conf.setBoolean("dfs.ha.allow.stale.reads", val); 186 } 187 188 /** 189 * @return true if the given nameNodeUri appears to be a logical URI. 190 * This is the case if there is a failover proxy provider configured 191 * for it in the given configuration. 192 */ 193 public static boolean isLogicalUri( 194 Configuration conf, URI nameNodeUri) { 195 String host = nameNodeUri.getHost(); 196 String configKey = DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." 197 + host; 198 return conf.get(configKey) != null; 199 } 200 201 /** 202 * Parse the HDFS URI out of the provided token. 203 * @throws IOException if the token is invalid 204 */ 205 public static URI getServiceUriFromToken( 206 Token<DelegationTokenIdentifier> token) 207 throws IOException { 208 String tokStr = token.getService().toString(); 209 210 if (tokStr.startsWith(HA_DT_SERVICE_PREFIX)) { 211 tokStr = tokStr.replaceFirst(HA_DT_SERVICE_PREFIX, ""); 212 } 213 214 try { 215 return new URI(HdfsConstants.HDFS_URI_SCHEME + "://" + 216 tokStr); 217 } catch (URISyntaxException e) { 218 throw new IOException("Invalid token contents: '" + 219 tokStr + "'"); 220 } 221 } 222 223 /** 224 * Get the service name used in the delegation token for the given logical 225 * HA service. 226 * @param uri the logical URI of the cluster 227 * @return the service name 228 */ 229 public static Text buildTokenServiceForLogicalUri(URI uri) { 230 return new Text(HA_DT_SERVICE_PREFIX + uri.getHost()); 231 } 232 233 /** 234 * @return true if this token corresponds to a logical nameservice 235 * rather than a specific namenode. 236 */ 237 public static boolean isTokenForLogicalUri( 238 Token<DelegationTokenIdentifier> token) { 239 return token.getService().toString().startsWith(HA_DT_SERVICE_PREFIX); 240 } 241 242 /** 243 * Locate a delegation token associated with the given HA cluster URI, and if 244 * one is found, clone it to also represent the underlying namenode address. 245 * @param ugi the UGI to modify 246 * @param haUri the logical URI for the cluster 247 * @param nnAddrs collection of NNs in the cluster to which the token 248 * applies 249 */ 250 public static void cloneDelegationTokenForLogicalUri( 251 UserGroupInformation ugi, URI haUri, 252 Collection<InetSocketAddress> nnAddrs) { 253 Text haService = HAUtil.buildTokenServiceForLogicalUri(haUri); 254 Token<DelegationTokenIdentifier> haToken = 255 tokenSelector.selectToken(haService, ugi.getTokens()); 256 if (haToken != null) { 257 for (InetSocketAddress singleNNAddr : nnAddrs) { 258 Token<DelegationTokenIdentifier> specificToken = 259 new Token<DelegationTokenIdentifier>(haToken); 260 SecurityUtil.setTokenService(specificToken, singleNNAddr); 261 ugi.addToken(specificToken); 262 LOG.debug("Mapped HA service delegation token for logical URI " + 263 haUri + " to namenode " + singleNNAddr); 264 } 265 } else { 266 LOG.debug("No HA service delegation token found for logical URI " + 267 haUri); 268 } 269 } 270 271 /** 272 * Get the internet address of the currently-active NN. This should rarely be 273 * used, since callers of this method who connect directly to the NN using the 274 * resulting InetSocketAddress will not be able to connect to the active NN if 275 * a failover were to occur after this method has been called. 276 * 277 * @param fs the file system to get the active address of. 278 * @return the internet address of the currently-active NN. 279 * @throws IOException if an error occurs while resolving the active NN. 280 */ 281 @SuppressWarnings("deprecation") 282 public static InetSocketAddress getAddressOfActive(FileSystem fs) 283 throws IOException { 284 if (!(fs instanceof DistributedFileSystem)) { 285 throw new IllegalArgumentException("FileSystem " + fs + " is not a DFS."); 286 } 287 // force client address resolution. 288 fs.exists(new Path("/")); 289 DistributedFileSystem dfs = (DistributedFileSystem) fs; 290 DFSClient dfsClient = dfs.getClient(); 291 return RPC.getServerAddress(dfsClient.getNamenode()); 292 } 293 }