001/** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018package org.apache.hadoop.hdfs; 019 020import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT; 021import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY; 022import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX; 023import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT; 024import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY; 025import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT; 026import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY; 027import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY; 028import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT; 029 030import java.io.IOException; 031import java.lang.reflect.Constructor; 032import java.lang.reflect.InvocationHandler; 033import java.lang.reflect.Proxy; 034import java.net.InetSocketAddress; 035import java.net.URI; 036import java.util.HashMap; 037import java.util.Map; 038import java.util.concurrent.TimeUnit; 039 040import org.apache.commons.logging.Log; 041import org.apache.commons.logging.LogFactory; 042import org.apache.hadoop.conf.Configuration; 043import org.apache.hadoop.hdfs.DFSClient.Conf; 044import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; 045import org.apache.hadoop.hdfs.protocol.ClientProtocol; 046import org.apache.hadoop.hdfs.protocol.HdfsConstants; 047import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB; 048import org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB; 049import org.apache.hadoop.hdfs.protocolPB.JournalProtocolPB; 050import org.apache.hadoop.hdfs.protocolPB.JournalProtocolTranslatorPB; 051import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolPB; 052import org.apache.hadoop.hdfs.protocolPB.NamenodeProtocolTranslatorPB; 053import org.apache.hadoop.hdfs.server.namenode.ha.AbstractNNFailoverProxyProvider; 054import org.apache.hadoop.hdfs.server.namenode.ha.WrappedFailoverProxyProvider; 055import org.apache.hadoop.hdfs.server.namenode.NameNode; 056import org.apache.hadoop.hdfs.server.namenode.SafeModeException; 057import org.apache.hadoop.hdfs.server.protocol.JournalProtocol; 058import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol; 059import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; 060import org.apache.hadoop.io.Text; 061import org.apache.hadoop.io.retry.DefaultFailoverProxyProvider; 062import org.apache.hadoop.io.retry.FailoverProxyProvider; 063import org.apache.hadoop.io.retry.LossyRetryInvocationHandler; 064import org.apache.hadoop.io.retry.RetryPolicies; 065import org.apache.hadoop.io.retry.RetryPolicy; 066import org.apache.hadoop.io.retry.RetryProxy; 067import org.apache.hadoop.io.retry.RetryUtils; 068import org.apache.hadoop.ipc.ProtobufRpcEngine; 069import org.apache.hadoop.ipc.RPC; 070import org.apache.hadoop.ipc.RemoteException; 071import org.apache.hadoop.net.NetUtils; 072import org.apache.hadoop.security.RefreshUserMappingsProtocol; 073import org.apache.hadoop.security.SecurityUtil; 074import org.apache.hadoop.security.UserGroupInformation; 075import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol; 076import org.apache.hadoop.security.protocolPB.RefreshAuthorizationPolicyProtocolClientSideTranslatorPB; 077import org.apache.hadoop.security.protocolPB.RefreshAuthorizationPolicyProtocolPB; 078import org.apache.hadoop.security.protocolPB.RefreshUserMappingsProtocolClientSideTranslatorPB; 079import org.apache.hadoop.security.protocolPB.RefreshUserMappingsProtocolPB; 080import org.apache.hadoop.ipc.RefreshCallQueueProtocol; 081import org.apache.hadoop.ipc.protocolPB.RefreshCallQueueProtocolPB; 082import org.apache.hadoop.ipc.protocolPB.RefreshCallQueueProtocolClientSideTranslatorPB; 083import org.apache.hadoop.tools.GetUserMappingsProtocol; 084import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolClientSideTranslatorPB; 085import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolPB; 086 087import com.google.common.annotations.VisibleForTesting; 088import com.google.common.base.Preconditions; 089 090/** 091 * Create proxy objects to communicate with a remote NN. All remote access to an 092 * NN should be funneled through this class. Most of the time you'll want to use 093 * {@link NameNodeProxies#createProxy(Configuration, URI, Class)}, which will 094 * create either an HA- or non-HA-enabled client proxy as appropriate. 095 */ 096public class NameNodeProxies { 097 098 private static final Log LOG = LogFactory.getLog(NameNodeProxies.class); 099 100 /** 101 * Wrapper for a client proxy as well as its associated service ID. 102 * This is simply used as a tuple-like return type for 103 * {@link NameNodeProxies#createProxy} and 104 * {@link NameNodeProxies#createNonHAProxy}. 105 */ 106 public static class ProxyAndInfo<PROXYTYPE> { 107 private final PROXYTYPE proxy; 108 private final Text dtService; 109 private final InetSocketAddress address; 110 111 public ProxyAndInfo(PROXYTYPE proxy, Text dtService, 112 InetSocketAddress address) { 113 this.proxy = proxy; 114 this.dtService = dtService; 115 this.address = address; 116 } 117 118 public PROXYTYPE getProxy() { 119 return proxy; 120 } 121 122 public Text getDelegationTokenService() { 123 return dtService; 124 } 125 126 public InetSocketAddress getAddress() { 127 return address; 128 } 129 } 130 131 /** 132 * Creates the namenode proxy with the passed protocol. This will handle 133 * creation of either HA- or non-HA-enabled proxy objects, depending upon 134 * if the provided URI is a configured logical URI. 135 * 136 * @param conf the configuration containing the required IPC 137 * properties, client failover configurations, etc. 138 * @param nameNodeUri the URI pointing either to a specific NameNode 139 * or to a logical nameservice. 140 * @param xface the IPC interface which should be created 141 * @return an object containing both the proxy and the associated 142 * delegation token service it corresponds to 143 * @throws IOException if there is an error creating the proxy 144 **/ 145 @SuppressWarnings("unchecked") 146 public static <T> ProxyAndInfo<T> createProxy(Configuration conf, 147 URI nameNodeUri, Class<T> xface) throws IOException { 148 AbstractNNFailoverProxyProvider<T> failoverProxyProvider = 149 createFailoverProxyProvider(conf, nameNodeUri, xface, true); 150 151 if (failoverProxyProvider == null) { 152 // Non-HA case 153 return createNonHAProxy(conf, NameNode.getAddress(nameNodeUri), xface, 154 UserGroupInformation.getCurrentUser(), true); 155 } else { 156 // HA case 157 Conf config = new Conf(conf); 158 T proxy = (T) RetryProxy.create(xface, failoverProxyProvider, 159 RetryPolicies.failoverOnNetworkException( 160 RetryPolicies.TRY_ONCE_THEN_FAIL, config.maxFailoverAttempts, 161 config.maxRetryAttempts, config.failoverSleepBaseMillis, 162 config.failoverSleepMaxMillis)); 163 164 Text dtService; 165 if (failoverProxyProvider.useLogicalURI()) { 166 dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri); 167 } else { 168 dtService = SecurityUtil.buildTokenService( 169 NameNode.getAddress(nameNodeUri)); 170 } 171 return new ProxyAndInfo<T>(proxy, dtService, 172 NameNode.getAddress(nameNodeUri)); 173 } 174 } 175 176 /** 177 * Generate a dummy namenode proxy instance that utilizes our hacked 178 * {@link LossyRetryInvocationHandler}. Proxy instance generated using this 179 * method will proactively drop RPC responses. Currently this method only 180 * support HA setup. null will be returned if the given configuration is not 181 * for HA. 182 * 183 * @param config the configuration containing the required IPC 184 * properties, client failover configurations, etc. 185 * @param nameNodeUri the URI pointing either to a specific NameNode 186 * or to a logical nameservice. 187 * @param xface the IPC interface which should be created 188 * @param numResponseToDrop The number of responses to drop for each RPC call 189 * @return an object containing both the proxy and the associated 190 * delegation token service it corresponds to. Will return null of the 191 * given configuration does not support HA. 192 * @throws IOException if there is an error creating the proxy 193 */ 194 @SuppressWarnings("unchecked") 195 public static <T> ProxyAndInfo<T> createProxyWithLossyRetryHandler( 196 Configuration config, URI nameNodeUri, Class<T> xface, 197 int numResponseToDrop) throws IOException { 198 Preconditions.checkArgument(numResponseToDrop > 0); 199 AbstractNNFailoverProxyProvider<T> failoverProxyProvider = 200 createFailoverProxyProvider(config, nameNodeUri, xface, true); 201 202 if (failoverProxyProvider != null) { // HA case 203 int delay = config.getInt( 204 DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY, 205 DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT); 206 int maxCap = config.getInt( 207 DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY, 208 DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT); 209 int maxFailoverAttempts = config.getInt( 210 DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY, 211 DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT); 212 int maxRetryAttempts = config.getInt( 213 DFS_CLIENT_RETRY_MAX_ATTEMPTS_KEY, 214 DFS_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT); 215 InvocationHandler dummyHandler = new LossyRetryInvocationHandler<T>( 216 numResponseToDrop, failoverProxyProvider, 217 RetryPolicies.failoverOnNetworkException( 218 RetryPolicies.TRY_ONCE_THEN_FAIL, maxFailoverAttempts, 219 Math.max(numResponseToDrop + 1, maxRetryAttempts), delay, 220 maxCap)); 221 222 T proxy = (T) Proxy.newProxyInstance( 223 failoverProxyProvider.getInterface().getClassLoader(), 224 new Class[] { xface }, dummyHandler); 225 Text dtService; 226 if (failoverProxyProvider.useLogicalURI()) { 227 dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri); 228 } else { 229 dtService = SecurityUtil.buildTokenService( 230 NameNode.getAddress(nameNodeUri)); 231 } 232 return new ProxyAndInfo<T>(proxy, dtService, 233 NameNode.getAddress(nameNodeUri)); 234 } else { 235 LOG.warn("Currently creating proxy using " + 236 "LossyRetryInvocationHandler requires NN HA setup"); 237 return null; 238 } 239 } 240 241 /** 242 * Creates an explicitly non-HA-enabled proxy object. Most of the time you 243 * don't want to use this, and should instead use {@link NameNodeProxies#createProxy}. 244 * 245 * @param conf the configuration object 246 * @param nnAddr address of the remote NN to connect to 247 * @param xface the IPC interface which should be created 248 * @param ugi the user who is making the calls on the proxy object 249 * @param withRetries certain interfaces have a non-standard retry policy 250 * @return an object containing both the proxy and the associated 251 * delegation token service it corresponds to 252 * @throws IOException 253 */ 254 @SuppressWarnings("unchecked") 255 public static <T> ProxyAndInfo<T> createNonHAProxy( 256 Configuration conf, InetSocketAddress nnAddr, Class<T> xface, 257 UserGroupInformation ugi, boolean withRetries) throws IOException { 258 Text dtService = SecurityUtil.buildTokenService(nnAddr); 259 260 T proxy; 261 if (xface == ClientProtocol.class) { 262 proxy = (T) createNNProxyWithClientProtocol(nnAddr, conf, ugi, 263 withRetries); 264 } else if (xface == JournalProtocol.class) { 265 proxy = (T) createNNProxyWithJournalProtocol(nnAddr, conf, ugi); 266 } else if (xface == NamenodeProtocol.class) { 267 proxy = (T) createNNProxyWithNamenodeProtocol(nnAddr, conf, ugi, 268 withRetries); 269 } else if (xface == GetUserMappingsProtocol.class) { 270 proxy = (T) createNNProxyWithGetUserMappingsProtocol(nnAddr, conf, ugi); 271 } else if (xface == RefreshUserMappingsProtocol.class) { 272 proxy = (T) createNNProxyWithRefreshUserMappingsProtocol(nnAddr, conf, ugi); 273 } else if (xface == RefreshAuthorizationPolicyProtocol.class) { 274 proxy = (T) createNNProxyWithRefreshAuthorizationPolicyProtocol(nnAddr, 275 conf, ugi); 276 } else if (xface == RefreshCallQueueProtocol.class) { 277 proxy = (T) createNNProxyWithRefreshCallQueueProtocol(nnAddr, conf, ugi); 278 } else { 279 String message = "Unsupported protocol found when creating the proxy " + 280 "connection to NameNode: " + 281 ((xface != null) ? xface.getClass().getName() : "null"); 282 LOG.error(message); 283 throw new IllegalStateException(message); 284 } 285 286 return new ProxyAndInfo<T>(proxy, dtService, nnAddr); 287 } 288 289 private static JournalProtocol createNNProxyWithJournalProtocol( 290 InetSocketAddress address, Configuration conf, UserGroupInformation ugi) 291 throws IOException { 292 JournalProtocolPB proxy = (JournalProtocolPB) createNameNodeProxy(address, 293 conf, ugi, JournalProtocolPB.class); 294 return new JournalProtocolTranslatorPB(proxy); 295 } 296 297 private static RefreshAuthorizationPolicyProtocol 298 createNNProxyWithRefreshAuthorizationPolicyProtocol(InetSocketAddress address, 299 Configuration conf, UserGroupInformation ugi) throws IOException { 300 RefreshAuthorizationPolicyProtocolPB proxy = (RefreshAuthorizationPolicyProtocolPB) 301 createNameNodeProxy(address, conf, ugi, RefreshAuthorizationPolicyProtocolPB.class); 302 return new RefreshAuthorizationPolicyProtocolClientSideTranslatorPB(proxy); 303 } 304 305 private static RefreshUserMappingsProtocol 306 createNNProxyWithRefreshUserMappingsProtocol(InetSocketAddress address, 307 Configuration conf, UserGroupInformation ugi) throws IOException { 308 RefreshUserMappingsProtocolPB proxy = (RefreshUserMappingsProtocolPB) 309 createNameNodeProxy(address, conf, ugi, RefreshUserMappingsProtocolPB.class); 310 return new RefreshUserMappingsProtocolClientSideTranslatorPB(proxy); 311 } 312 313 private static RefreshCallQueueProtocol 314 createNNProxyWithRefreshCallQueueProtocol(InetSocketAddress address, 315 Configuration conf, UserGroupInformation ugi) throws IOException { 316 RefreshCallQueueProtocolPB proxy = (RefreshCallQueueProtocolPB) 317 createNameNodeProxy(address, conf, ugi, RefreshCallQueueProtocolPB.class); 318 return new RefreshCallQueueProtocolClientSideTranslatorPB(proxy); 319 } 320 321 private static GetUserMappingsProtocol createNNProxyWithGetUserMappingsProtocol( 322 InetSocketAddress address, Configuration conf, UserGroupInformation ugi) 323 throws IOException { 324 GetUserMappingsProtocolPB proxy = (GetUserMappingsProtocolPB) 325 createNameNodeProxy(address, conf, ugi, GetUserMappingsProtocolPB.class); 326 return new GetUserMappingsProtocolClientSideTranslatorPB(proxy); 327 } 328 329 private static NamenodeProtocol createNNProxyWithNamenodeProtocol( 330 InetSocketAddress address, Configuration conf, UserGroupInformation ugi, 331 boolean withRetries) throws IOException { 332 NamenodeProtocolPB proxy = (NamenodeProtocolPB) createNameNodeProxy( 333 address, conf, ugi, NamenodeProtocolPB.class); 334 if (withRetries) { // create the proxy with retries 335 RetryPolicy timeoutPolicy = RetryPolicies.exponentialBackoffRetry(5, 200, 336 TimeUnit.MILLISECONDS); 337 Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap 338 = new HashMap<Class<? extends Exception>, RetryPolicy>(); 339 RetryPolicy methodPolicy = RetryPolicies.retryByException(timeoutPolicy, 340 exceptionToPolicyMap); 341 Map<String, RetryPolicy> methodNameToPolicyMap 342 = new HashMap<String, RetryPolicy>(); 343 methodNameToPolicyMap.put("getBlocks", methodPolicy); 344 methodNameToPolicyMap.put("getAccessKeys", methodPolicy); 345 proxy = (NamenodeProtocolPB) RetryProxy.create(NamenodeProtocolPB.class, 346 proxy, methodNameToPolicyMap); 347 } 348 return new NamenodeProtocolTranslatorPB(proxy); 349 } 350 351 private static ClientProtocol createNNProxyWithClientProtocol( 352 InetSocketAddress address, Configuration conf, UserGroupInformation ugi, 353 boolean withRetries) throws IOException { 354 RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine.class); 355 356 final RetryPolicy defaultPolicy = 357 RetryUtils.getDefaultRetryPolicy( 358 conf, 359 DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, 360 DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT, 361 DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_KEY, 362 DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT, 363 SafeModeException.class); 364 365 final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class); 366 ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy( 367 ClientNamenodeProtocolPB.class, version, address, ugi, conf, 368 NetUtils.getDefaultSocketFactory(conf), 369 org.apache.hadoop.ipc.Client.getTimeout(conf), defaultPolicy) 370 .getProxy(); 371 372 if (withRetries) { // create the proxy with retries 373 374 RetryPolicy createPolicy = RetryPolicies 375 .retryUpToMaximumCountWithFixedSleep(5, 376 HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS); 377 378 Map<Class<? extends Exception>, RetryPolicy> remoteExceptionToPolicyMap 379 = new HashMap<Class<? extends Exception>, RetryPolicy>(); 380 remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class, 381 createPolicy); 382 383 Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap 384 = new HashMap<Class<? extends Exception>, RetryPolicy>(); 385 exceptionToPolicyMap.put(RemoteException.class, RetryPolicies 386 .retryByRemoteException(defaultPolicy, 387 remoteExceptionToPolicyMap)); 388 RetryPolicy methodPolicy = RetryPolicies.retryByException( 389 defaultPolicy, exceptionToPolicyMap); 390 Map<String, RetryPolicy> methodNameToPolicyMap 391 = new HashMap<String, RetryPolicy>(); 392 393 methodNameToPolicyMap.put("create", methodPolicy); 394 395 proxy = (ClientNamenodeProtocolPB) RetryProxy.create( 396 ClientNamenodeProtocolPB.class, 397 new DefaultFailoverProxyProvider<ClientNamenodeProtocolPB>( 398 ClientNamenodeProtocolPB.class, proxy), 399 methodNameToPolicyMap, 400 defaultPolicy); 401 } 402 return new ClientNamenodeProtocolTranslatorPB(proxy); 403 } 404 405 private static Object createNameNodeProxy(InetSocketAddress address, 406 Configuration conf, UserGroupInformation ugi, Class<?> xface) 407 throws IOException { 408 RPC.setProtocolEngine(conf, xface, ProtobufRpcEngine.class); 409 Object proxy = RPC.getProxy(xface, RPC.getProtocolVersion(xface), address, 410 ugi, conf, NetUtils.getDefaultSocketFactory(conf)); 411 return proxy; 412 } 413 414 /** Gets the configured Failover proxy provider's class */ 415 @VisibleForTesting 416 public static <T> Class<FailoverProxyProvider<T>> getFailoverProxyProviderClass( 417 Configuration conf, URI nameNodeUri) throws IOException { 418 if (nameNodeUri == null) { 419 return null; 420 } 421 String host = nameNodeUri.getHost(); 422 423 String configKey = DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." 424 + host; 425 try { 426 @SuppressWarnings("unchecked") 427 Class<FailoverProxyProvider<T>> ret = (Class<FailoverProxyProvider<T>>) conf 428 .getClass(configKey, null, FailoverProxyProvider.class); 429 return ret; 430 } catch (RuntimeException e) { 431 if (e.getCause() instanceof ClassNotFoundException) { 432 throw new IOException("Could not load failover proxy provider class " 433 + conf.get(configKey) + " which is configured for authority " 434 + nameNodeUri, e); 435 } else { 436 throw e; 437 } 438 } 439 } 440 441 /** Creates the Failover proxy provider instance*/ 442 @VisibleForTesting 443 public static <T> AbstractNNFailoverProxyProvider<T> createFailoverProxyProvider( 444 Configuration conf, URI nameNodeUri, Class<T> xface, boolean checkPort) 445 throws IOException { 446 Class<FailoverProxyProvider<T>> failoverProxyProviderClass = null; 447 AbstractNNFailoverProxyProvider<T> providerNN; 448 Preconditions.checkArgument( 449 xface.isAssignableFrom(NamenodeProtocols.class), 450 "Interface %s is not a NameNode protocol", xface); 451 try { 452 // Obtain the class of the proxy provider 453 failoverProxyProviderClass = getFailoverProxyProviderClass(conf, 454 nameNodeUri); 455 if (failoverProxyProviderClass == null) { 456 return null; 457 } 458 // Create a proxy provider instance. 459 Constructor<FailoverProxyProvider<T>> ctor = failoverProxyProviderClass 460 .getConstructor(Configuration.class, URI.class, Class.class); 461 FailoverProxyProvider<T> provider = ctor.newInstance(conf, nameNodeUri, 462 xface); 463 464 // If the proxy provider is of an old implementation, wrap it. 465 if (!(provider instanceof AbstractNNFailoverProxyProvider)) { 466 providerNN = new WrappedFailoverProxyProvider<T>(provider); 467 } else { 468 providerNN = (AbstractNNFailoverProxyProvider<T>)provider; 469 } 470 } catch (Exception e) { 471 String message = "Couldn't create proxy provider " + failoverProxyProviderClass; 472 if (LOG.isDebugEnabled()) { 473 LOG.debug(message, e); 474 } 475 if (e.getCause() instanceof IOException) { 476 throw (IOException) e.getCause(); 477 } else { 478 throw new IOException(message, e); 479 } 480 } 481 482 // Check the port in the URI, if it is logical. 483 if (checkPort && providerNN.useLogicalURI()) { 484 int port = nameNodeUri.getPort(); 485 if (port > 0 && port != NameNode.DEFAULT_PORT) { 486 // Throwing here without any cleanup is fine since we have not 487 // actually created the underlying proxies yet. 488 throw new IOException("Port " + port + " specified in URI " 489 + nameNodeUri + " but host '" + nameNodeUri.getHost() 490 + "' is a logical (HA) namenode" 491 + " and does not use port information."); 492 } 493 } 494 return providerNN; 495 } 496 497}