001 /** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018 019 package org.apache.hadoop.hdfs.web; 020 021 import java.io.BufferedOutputStream; 022 import java.io.FileNotFoundException; 023 import java.io.IOException; 024 import java.io.InputStream; 025 import java.io.InputStreamReader; 026 import java.net.HttpURLConnection; 027 import java.net.InetSocketAddress; 028 import java.net.MalformedURLException; 029 import java.net.URI; 030 import java.net.URISyntaxException; 031 import java.net.URL; 032 import java.security.PrivilegedExceptionAction; 033 import java.util.List; 034 import java.util.Map; 035 import java.util.StringTokenizer; 036 037 import javax.ws.rs.core.MediaType; 038 039 import org.apache.commons.logging.Log; 040 import org.apache.commons.logging.LogFactory; 041 import org.apache.hadoop.conf.Configuration; 042 import org.apache.hadoop.fs.BlockLocation; 043 import org.apache.hadoop.fs.ContentSummary; 044 import org.apache.hadoop.fs.DelegationTokenRenewer; 045 import org.apache.hadoop.fs.FSDataInputStream; 046 import org.apache.hadoop.fs.FSDataOutputStream; 047 import org.apache.hadoop.fs.FileStatus; 048 import org.apache.hadoop.fs.FileSystem; 049 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; 050 import org.apache.hadoop.fs.Options; 051 import org.apache.hadoop.fs.Path; 052 import org.apache.hadoop.fs.permission.FsPermission; 053 import org.apache.hadoop.hdfs.DFSConfigKeys; 054 import org.apache.hadoop.hdfs.DFSUtil; 055 import org.apache.hadoop.hdfs.HAUtil; 056 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; 057 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; 058 import org.apache.hadoop.hdfs.server.namenode.SafeModeException; 059 import org.apache.hadoop.hdfs.web.resources.AccessTimeParam; 060 import org.apache.hadoop.hdfs.web.resources.BlockSizeParam; 061 import org.apache.hadoop.hdfs.web.resources.BufferSizeParam; 062 import org.apache.hadoop.hdfs.web.resources.ConcatSourcesParam; 063 import org.apache.hadoop.hdfs.web.resources.CreateParentParam; 064 import org.apache.hadoop.hdfs.web.resources.DelegationParam; 065 import org.apache.hadoop.hdfs.web.resources.DeleteOpParam; 066 import org.apache.hadoop.hdfs.web.resources.DestinationParam; 067 import org.apache.hadoop.hdfs.web.resources.DoAsParam; 068 import org.apache.hadoop.hdfs.web.resources.GetOpParam; 069 import org.apache.hadoop.hdfs.web.resources.GroupParam; 070 import org.apache.hadoop.hdfs.web.resources.HttpOpParam; 071 import org.apache.hadoop.hdfs.web.resources.LengthParam; 072 import org.apache.hadoop.hdfs.web.resources.ModificationTimeParam; 073 import org.apache.hadoop.hdfs.web.resources.OffsetParam; 074 import org.apache.hadoop.hdfs.web.resources.OverwriteParam; 075 import org.apache.hadoop.hdfs.web.resources.OwnerParam; 076 import org.apache.hadoop.hdfs.web.resources.Param; 077 import org.apache.hadoop.hdfs.web.resources.PermissionParam; 078 import org.apache.hadoop.hdfs.web.resources.PostOpParam; 079 import org.apache.hadoop.hdfs.web.resources.PutOpParam; 080 import org.apache.hadoop.hdfs.web.resources.RecursiveParam; 081 import org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam; 082 import org.apache.hadoop.hdfs.web.resources.RenewerParam; 083 import org.apache.hadoop.hdfs.web.resources.ReplicationParam; 084 import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam; 085 import org.apache.hadoop.hdfs.web.resources.UserParam; 086 import org.apache.hadoop.io.Text; 087 import org.apache.hadoop.io.retry.RetryPolicies; 088 import org.apache.hadoop.io.retry.RetryPolicy; 089 import org.apache.hadoop.io.retry.RetryUtils; 090 import org.apache.hadoop.ipc.RemoteException; 091 import org.apache.hadoop.net.NetUtils; 092 import org.apache.hadoop.security.SecurityUtil; 093 import org.apache.hadoop.security.UserGroupInformation; 094 import org.apache.hadoop.security.authentication.client.AuthenticationException; 095 import org.apache.hadoop.security.token.SecretManager.InvalidToken; 096 import org.apache.hadoop.security.token.Token; 097 import org.apache.hadoop.security.token.TokenIdentifier; 098 import org.apache.hadoop.util.Progressable; 099 import org.mortbay.util.ajax.JSON; 100 101 import com.google.common.base.Charsets; 102 import com.google.common.collect.Lists; 103 104 /** A FileSystem for HDFS over the web. */ 105 public class WebHdfsFileSystem extends FileSystem 106 implements DelegationTokenRenewer.Renewable, TokenAspect.TokenManagementDelegator { 107 public static final Log LOG = LogFactory.getLog(WebHdfsFileSystem.class); 108 /** File System URI: {SCHEME}://namenode:port/path/to/file */ 109 public static final String SCHEME = "webhdfs"; 110 /** WebHdfs version. */ 111 public static final int VERSION = 1; 112 /** Http URI: http://namenode:port/{PATH_PREFIX}/path/to/file */ 113 public static final String PATH_PREFIX = "/" + SCHEME + "/v" + VERSION; 114 115 /** Default connection factory may be overridden in tests to use smaller timeout values */ 116 protected URLConnectionFactory connectionFactory; 117 118 /** Delegation token kind */ 119 public static final Text TOKEN_KIND = new Text("WEBHDFS delegation"); 120 protected TokenAspect<WebHdfsFileSystem> tokenAspect; 121 122 private UserGroupInformation ugi; 123 private URI uri; 124 private Token<?> delegationToken; 125 private RetryPolicy retryPolicy = null; 126 private Path workingDir; 127 private InetSocketAddress nnAddrs[]; 128 private int currentNNAddrIndex; 129 130 /** 131 * Return the protocol scheme for the FileSystem. 132 * <p/> 133 * 134 * @return <code>webhdfs</code> 135 */ 136 @Override 137 public String getScheme() { 138 return SCHEME; 139 } 140 141 /** 142 * return the underlying transport protocol (http / https). 143 */ 144 protected String getTransportScheme() { 145 return "http"; 146 } 147 148 /** 149 * Initialize tokenAspect. This function is intended to 150 * be overridden by SWebHdfsFileSystem. 151 */ 152 protected synchronized void initializeTokenAspect() { 153 tokenAspect = new TokenAspect<WebHdfsFileSystem>(this, TOKEN_KIND); 154 } 155 156 @Override 157 public synchronized void initialize(URI uri, Configuration conf 158 ) throws IOException { 159 super.initialize(uri, conf); 160 setConf(conf); 161 /** set user pattern based on configuration file */ 162 UserParam.setUserPattern(conf.get(DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY, DFSConfigKeys.DFS_WEBHDFS_USER_PATTERN_DEFAULT)); 163 connectionFactory = URLConnectionFactory 164 .newDefaultURLConnectionFactory(conf); 165 initializeTokenAspect(); 166 167 ugi = UserGroupInformation.getCurrentUser(); 168 169 try { 170 this.uri = new URI(uri.getScheme(), uri.getAuthority(), null, 171 null, null); 172 this.nnAddrs = DFSUtil.resolveWebHdfsUri(this.uri, conf); 173 } catch (URISyntaxException e) { 174 throw new IllegalArgumentException(e); 175 } 176 177 if (!HAUtil.isLogicalUri(conf, this.uri)) { 178 this.retryPolicy = 179 RetryUtils.getDefaultRetryPolicy( 180 conf, 181 DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_KEY, 182 DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_ENABLED_DEFAULT, 183 DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_KEY, 184 DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_POLICY_SPEC_DEFAULT, 185 SafeModeException.class); 186 } else { 187 188 int maxFailoverAttempts = conf.getInt( 189 DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY, 190 DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT); 191 int maxRetryAttempts = conf.getInt( 192 DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_KEY, 193 DFSConfigKeys.DFS_HTTP_CLIENT_RETRY_MAX_ATTEMPTS_DEFAULT); 194 int failoverSleepBaseMillis = conf.getInt( 195 DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY, 196 DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT); 197 int failoverSleepMaxMillis = conf.getInt( 198 DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY, 199 DFSConfigKeys.DFS_HTTP_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT); 200 201 this.retryPolicy = RetryPolicies 202 .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL, 203 maxFailoverAttempts, maxRetryAttempts, failoverSleepBaseMillis, 204 failoverSleepMaxMillis); 205 } 206 207 this.workingDir = getHomeDirectory(); 208 209 if (UserGroupInformation.isSecurityEnabled()) { 210 tokenAspect.initDelegationToken(ugi); 211 } 212 } 213 214 @Override 215 public URI getCanonicalUri() { 216 return super.getCanonicalUri(); 217 } 218 219 /** Is WebHDFS enabled in conf? */ 220 public static boolean isEnabled(final Configuration conf, final Log log) { 221 final boolean b = conf.getBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, 222 DFSConfigKeys.DFS_WEBHDFS_ENABLED_DEFAULT); 223 return b; 224 } 225 226 protected synchronized Token<?> getDelegationToken() throws IOException { 227 tokenAspect.ensureTokenInitialized(); 228 return delegationToken; 229 } 230 231 @Override 232 protected int getDefaultPort() { 233 return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 234 DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT); 235 } 236 237 @Override 238 public URI getUri() { 239 return this.uri; 240 } 241 242 @Override 243 protected URI canonicalizeUri(URI uri) { 244 return NetUtils.getCanonicalUri(uri, getDefaultPort()); 245 } 246 247 /** @return the home directory. */ 248 public static String getHomeDirectoryString(final UserGroupInformation ugi) { 249 return "/user/" + ugi.getShortUserName(); 250 } 251 252 @Override 253 public Path getHomeDirectory() { 254 return makeQualified(new Path(getHomeDirectoryString(ugi))); 255 } 256 257 @Override 258 public synchronized Path getWorkingDirectory() { 259 return workingDir; 260 } 261 262 @Override 263 public synchronized void setWorkingDirectory(final Path dir) { 264 String result = makeAbsolute(dir).toUri().getPath(); 265 if (!DFSUtil.isValidName(result)) { 266 throw new IllegalArgumentException("Invalid DFS directory name " + 267 result); 268 } 269 workingDir = makeAbsolute(dir); 270 } 271 272 private Path makeAbsolute(Path f) { 273 return f.isAbsolute()? f: new Path(workingDir, f); 274 } 275 276 static Map<?, ?> jsonParse(final HttpURLConnection c, final boolean useErrorStream 277 ) throws IOException { 278 if (c.getContentLength() == 0) { 279 return null; 280 } 281 final InputStream in = useErrorStream? c.getErrorStream(): c.getInputStream(); 282 if (in == null) { 283 throw new IOException("The " + (useErrorStream? "error": "input") + " stream is null."); 284 } 285 final String contentType = c.getContentType(); 286 if (contentType != null) { 287 final MediaType parsed = MediaType.valueOf(contentType); 288 if (!MediaType.APPLICATION_JSON_TYPE.isCompatible(parsed)) { 289 throw new IOException("Content-Type \"" + contentType 290 + "\" is incompatible with \"" + MediaType.APPLICATION_JSON 291 + "\" (parsed=\"" + parsed + "\")"); 292 } 293 } 294 return (Map<?, ?>)JSON.parse(new InputStreamReader(in, Charsets.UTF_8)); 295 } 296 297 private static Map<?, ?> validateResponse(final HttpOpParam.Op op, 298 final HttpURLConnection conn, boolean unwrapException) throws IOException { 299 final int code = conn.getResponseCode(); 300 if (code != op.getExpectedHttpResponseCode()) { 301 final Map<?, ?> m; 302 try { 303 m = jsonParse(conn, true); 304 } catch(Exception e) { 305 throw new IOException("Unexpected HTTP response: code=" + code + " != " 306 + op.getExpectedHttpResponseCode() + ", " + op.toQueryString() 307 + ", message=" + conn.getResponseMessage(), e); 308 } 309 310 if (m == null) { 311 throw new IOException("Unexpected HTTP response: code=" + code + " != " 312 + op.getExpectedHttpResponseCode() + ", " + op.toQueryString() 313 + ", message=" + conn.getResponseMessage()); 314 } else if (m.get(RemoteException.class.getSimpleName()) == null) { 315 return m; 316 } 317 318 final RemoteException re = JsonUtil.toRemoteException(m); 319 throw unwrapException? toIOException(re): re; 320 } 321 return null; 322 } 323 324 /** 325 * Covert an exception to an IOException. 326 * 327 * For a non-IOException, wrap it with IOException. 328 * For a RemoteException, unwrap it. 329 * For an IOException which is not a RemoteException, return it. 330 */ 331 private static IOException toIOException(Exception e) { 332 if (!(e instanceof IOException)) { 333 return new IOException(e); 334 } 335 336 final IOException ioe = (IOException)e; 337 if (!(ioe instanceof RemoteException)) { 338 return ioe; 339 } 340 341 return ((RemoteException)ioe).unwrapRemoteException(); 342 } 343 344 private synchronized InetSocketAddress getCurrentNNAddr() { 345 return nnAddrs[currentNNAddrIndex]; 346 } 347 348 /** 349 * Reset the appropriate state to gracefully fail over to another name node 350 */ 351 private synchronized void resetStateToFailOver() { 352 currentNNAddrIndex = (currentNNAddrIndex + 1) % nnAddrs.length; 353 delegationToken = null; 354 tokenAspect.reset(); 355 } 356 357 /** 358 * Return a URL pointing to given path on the namenode. 359 * 360 * @param path to obtain the URL for 361 * @param query string to append to the path 362 * @return namenode URL referring to the given path 363 * @throws IOException on error constructing the URL 364 */ 365 private URL getNamenodeURL(String path, String query) throws IOException { 366 InetSocketAddress nnAddr = getCurrentNNAddr(); 367 final URL url = new URL(getTransportScheme(), nnAddr.getHostName(), 368 nnAddr.getPort(), path + '?' + query); 369 if (LOG.isTraceEnabled()) { 370 LOG.trace("url=" + url); 371 } 372 return url; 373 } 374 375 Param<?,?>[] getAuthParameters(final HttpOpParam.Op op) throws IOException { 376 List<Param<?,?>> authParams = Lists.newArrayList(); 377 // Skip adding delegation token for token operations because these 378 // operations require authentication. 379 Token<?> token = null; 380 if (UserGroupInformation.isSecurityEnabled() && !op.getRequireAuth()) { 381 token = getDelegationToken(); 382 } 383 if (token != null) { 384 authParams.add(new DelegationParam(token.encodeToUrlString())); 385 } else { 386 UserGroupInformation userUgi = ugi; 387 UserGroupInformation realUgi = userUgi.getRealUser(); 388 if (realUgi != null) { // proxy user 389 authParams.add(new DoAsParam(userUgi.getShortUserName())); 390 userUgi = realUgi; 391 } 392 authParams.add(new UserParam(userUgi.getShortUserName())); 393 } 394 return authParams.toArray(new Param<?,?>[0]); 395 } 396 397 URL toUrl(final HttpOpParam.Op op, final Path fspath, 398 final Param<?,?>... parameters) throws IOException { 399 //initialize URI path and query 400 final String path = PATH_PREFIX 401 + (fspath == null? "/": makeQualified(fspath).toUri().getRawPath()); 402 final String query = op.toQueryString() 403 + Param.toSortedString("&", getAuthParameters(op)) 404 + Param.toSortedString("&", parameters); 405 final URL url = getNamenodeURL(path, query); 406 if (LOG.isTraceEnabled()) { 407 LOG.trace("url=" + url); 408 } 409 return url; 410 } 411 412 /** 413 * Run a http operation. 414 * Connect to the http server, validate response, and obtain the JSON output. 415 * 416 * @param op http operation 417 * @param fspath file system path 418 * @param parameters parameters for the operation 419 * @return a JSON object, e.g. Object[], Map<?, ?>, etc. 420 * @throws IOException 421 */ 422 private Map<?, ?> run(final HttpOpParam.Op op, final Path fspath, 423 final Param<?,?>... parameters) throws IOException { 424 return new FsPathRunner(op, fspath, parameters).run().json; 425 } 426 427 /** 428 * This class is for initialing a HTTP connection, connecting to server, 429 * obtaining a response, and also handling retry on failures. 430 */ 431 abstract class AbstractRunner { 432 abstract protected URL getUrl() throws IOException; 433 434 protected final HttpOpParam.Op op; 435 private final boolean redirected; 436 437 private boolean checkRetry; 438 protected HttpURLConnection conn = null; 439 private Map<?, ?> json = null; 440 441 protected AbstractRunner(final HttpOpParam.Op op, boolean redirected) { 442 this.op = op; 443 this.redirected = redirected; 444 } 445 446 private HttpURLConnection getHttpUrlConnection(final URL url) 447 throws IOException, AuthenticationException { 448 UserGroupInformation connectUgi = ugi.getRealUser(); 449 if (connectUgi == null) { 450 connectUgi = ugi; 451 } 452 try { 453 return connectUgi.doAs( 454 new PrivilegedExceptionAction<HttpURLConnection>() { 455 @Override 456 public HttpURLConnection run() throws IOException { 457 return openHttpUrlConnection(url); 458 } 459 }); 460 } catch (IOException ioe) { 461 Throwable cause = ioe.getCause(); 462 if (cause != null && cause instanceof AuthenticationException) { 463 throw (AuthenticationException)cause; 464 } 465 throw ioe; 466 } catch (InterruptedException e) { 467 throw new IOException(e); 468 } 469 } 470 471 private HttpURLConnection openHttpUrlConnection(final URL url) 472 throws IOException { 473 final HttpURLConnection conn; 474 try { 475 conn = (HttpURLConnection) connectionFactory.openConnection(url, 476 op.getRequireAuth()); 477 } catch (AuthenticationException e) { 478 throw new IOException(e); 479 } 480 return conn; 481 } 482 483 private void init() throws IOException { 484 checkRetry = !redirected; 485 URL url = getUrl(); 486 try { 487 conn = getHttpUrlConnection(url); 488 } catch(AuthenticationException ae) { 489 checkRetry = false; 490 throw new IOException("Authentication failed, url=" + url, ae); 491 } 492 } 493 494 private void connect() throws IOException { 495 connect(op.getDoOutput()); 496 } 497 498 private void connect(boolean doOutput) throws IOException { 499 conn.setRequestMethod(op.getType().toString()); 500 conn.setDoOutput(doOutput); 501 conn.setInstanceFollowRedirects(false); 502 conn.connect(); 503 } 504 505 private void disconnect() { 506 if (conn != null) { 507 conn.disconnect(); 508 conn = null; 509 } 510 } 511 512 AbstractRunner run() throws IOException { 513 /** 514 * Do the real work. 515 * 516 * There are three cases that the code inside the loop can throw an 517 * IOException: 518 * 519 * <ul> 520 * <li>The connection has failed (e.g., ConnectException, 521 * @see FailoverOnNetworkExceptionRetry for more details)</li> 522 * <li>The namenode enters the standby state (i.e., StandbyException).</li> 523 * <li>The server returns errors for the command (i.e., RemoteException)</li> 524 * </ul> 525 * 526 * The call to shouldRetry() will conduct the retry policy. The policy 527 * examines the exception and swallows it if it decides to rerun the work. 528 */ 529 for(int retry = 0; ; retry++) { 530 try { 531 init(); 532 if (op.getDoOutput()) { 533 twoStepWrite(); 534 } else { 535 getResponse(op != GetOpParam.Op.OPEN); 536 } 537 return this; 538 } catch(IOException ioe) { 539 shouldRetry(ioe, retry); 540 } 541 } 542 } 543 544 private void shouldRetry(final IOException ioe, final int retry 545 ) throws IOException { 546 InetSocketAddress nnAddr = getCurrentNNAddr(); 547 if (checkRetry) { 548 try { 549 final RetryPolicy.RetryAction a = retryPolicy.shouldRetry( 550 ioe, retry, 0, true); 551 552 boolean isRetry = a.action == RetryPolicy.RetryAction.RetryDecision.RETRY; 553 boolean isFailoverAndRetry = 554 a.action == RetryPolicy.RetryAction.RetryDecision.FAILOVER_AND_RETRY; 555 556 if (isRetry || isFailoverAndRetry) { 557 LOG.info("Retrying connect to namenode: " + nnAddr 558 + ". Already tried " + retry + " time(s); retry policy is " 559 + retryPolicy + ", delay " + a.delayMillis + "ms."); 560 561 if (isFailoverAndRetry) { 562 resetStateToFailOver(); 563 } 564 565 Thread.sleep(a.delayMillis); 566 return; 567 } 568 } catch(Exception e) { 569 LOG.warn("Original exception is ", ioe); 570 throw toIOException(e); 571 } 572 } 573 throw toIOException(ioe); 574 } 575 576 /** 577 * Two-step Create/Append: 578 * Step 1) Submit a Http request with neither auto-redirect nor data. 579 * Step 2) Submit another Http request with the URL from the Location header with data. 580 * 581 * The reason of having two-step create/append is for preventing clients to 582 * send out the data before the redirect. This issue is addressed by the 583 * "Expect: 100-continue" header in HTTP/1.1; see RFC 2616, Section 8.2.3. 584 * Unfortunately, there are software library bugs (e.g. Jetty 6 http server 585 * and Java 6 http client), which do not correctly implement "Expect: 586 * 100-continue". The two-step create/append is a temporary workaround for 587 * the software library bugs. 588 */ 589 HttpURLConnection twoStepWrite() throws IOException { 590 //Step 1) Submit a Http request with neither auto-redirect nor data. 591 connect(false); 592 validateResponse(HttpOpParam.TemporaryRedirectOp.valueOf(op), conn, false); 593 final String redirect = conn.getHeaderField("Location"); 594 disconnect(); 595 checkRetry = false; 596 597 //Step 2) Submit another Http request with the URL from the Location header with data. 598 conn = (HttpURLConnection) connectionFactory.openConnection(new URL( 599 redirect)); 600 conn.setRequestProperty("Content-Type", 601 MediaType.APPLICATION_OCTET_STREAM); 602 conn.setChunkedStreamingMode(32 << 10); //32kB-chunk 603 connect(); 604 return conn; 605 } 606 607 FSDataOutputStream write(final int bufferSize) throws IOException { 608 return WebHdfsFileSystem.this.write(op, conn, bufferSize); 609 } 610 611 void getResponse(boolean getJsonAndDisconnect) throws IOException { 612 try { 613 connect(); 614 final int code = conn.getResponseCode(); 615 if (!redirected && op.getRedirect() 616 && code != op.getExpectedHttpResponseCode()) { 617 final String redirect = conn.getHeaderField("Location"); 618 json = validateResponse(HttpOpParam.TemporaryRedirectOp.valueOf(op), 619 conn, false); 620 disconnect(); 621 622 checkRetry = false; 623 conn = (HttpURLConnection) connectionFactory.openConnection(new URL( 624 redirect)); 625 connect(); 626 } 627 628 json = validateResponse(op, conn, false); 629 if (json == null && getJsonAndDisconnect) { 630 json = jsonParse(conn, false); 631 } 632 } finally { 633 if (getJsonAndDisconnect) { 634 disconnect(); 635 } 636 } 637 } 638 } 639 640 final class FsPathRunner extends AbstractRunner { 641 private final Path fspath; 642 private final Param<?, ?>[] parameters; 643 644 FsPathRunner(final HttpOpParam.Op op, final Path fspath, final Param<?,?>... parameters) { 645 super(op, false); 646 this.fspath = fspath; 647 this.parameters = parameters; 648 } 649 650 @Override 651 protected URL getUrl() throws IOException { 652 return toUrl(op, fspath, parameters); 653 } 654 } 655 656 final class URLRunner extends AbstractRunner { 657 private final URL url; 658 @Override 659 protected URL getUrl() { 660 return url; 661 } 662 663 protected URLRunner(final HttpOpParam.Op op, final URL url, boolean redirected) { 664 super(op, redirected); 665 this.url = url; 666 } 667 } 668 669 private FsPermission applyUMask(FsPermission permission) { 670 if (permission == null) { 671 permission = FsPermission.getDefault(); 672 } 673 return permission.applyUMask(FsPermission.getUMask(getConf())); 674 } 675 676 private HdfsFileStatus getHdfsFileStatus(Path f) throws IOException { 677 final HttpOpParam.Op op = GetOpParam.Op.GETFILESTATUS; 678 final Map<?, ?> json = run(op, f); 679 final HdfsFileStatus status = JsonUtil.toFileStatus(json, true); 680 if (status == null) { 681 throw new FileNotFoundException("File does not exist: " + f); 682 } 683 return status; 684 } 685 686 @Override 687 public FileStatus getFileStatus(Path f) throws IOException { 688 statistics.incrementReadOps(1); 689 return makeQualified(getHdfsFileStatus(f), f); 690 } 691 692 private FileStatus makeQualified(HdfsFileStatus f, Path parent) { 693 return new FileStatus(f.getLen(), f.isDir(), f.getReplication(), 694 f.getBlockSize(), f.getModificationTime(), f.getAccessTime(), 695 f.getPermission(), f.getOwner(), f.getGroup(), 696 f.isSymlink() ? new Path(f.getSymlink()) : null, 697 f.getFullPath(parent).makeQualified(getUri(), getWorkingDirectory())); 698 } 699 700 @Override 701 public boolean mkdirs(Path f, FsPermission permission) throws IOException { 702 statistics.incrementWriteOps(1); 703 final HttpOpParam.Op op = PutOpParam.Op.MKDIRS; 704 final Map<?, ?> json = run(op, f, 705 new PermissionParam(applyUMask(permission))); 706 return (Boolean)json.get("boolean"); 707 } 708 709 /** 710 * Create a symlink pointing to the destination path. 711 * @see org.apache.hadoop.fs.Hdfs#createSymlink(Path, Path, boolean) 712 */ 713 public void createSymlink(Path destination, Path f, boolean createParent 714 ) throws IOException { 715 statistics.incrementWriteOps(1); 716 final HttpOpParam.Op op = PutOpParam.Op.CREATESYMLINK; 717 run(op, f, new DestinationParam(makeQualified(destination).toUri().getPath()), 718 new CreateParentParam(createParent)); 719 } 720 721 @Override 722 public boolean rename(final Path src, final Path dst) throws IOException { 723 statistics.incrementWriteOps(1); 724 final HttpOpParam.Op op = PutOpParam.Op.RENAME; 725 final Map<?, ?> json = run(op, src, 726 new DestinationParam(makeQualified(dst).toUri().getPath())); 727 return (Boolean)json.get("boolean"); 728 } 729 730 @SuppressWarnings("deprecation") 731 @Override 732 public void rename(final Path src, final Path dst, 733 final Options.Rename... options) throws IOException { 734 statistics.incrementWriteOps(1); 735 final HttpOpParam.Op op = PutOpParam.Op.RENAME; 736 run(op, src, new DestinationParam(makeQualified(dst).toUri().getPath()), 737 new RenameOptionSetParam(options)); 738 } 739 740 @Override 741 public void setOwner(final Path p, final String owner, final String group 742 ) throws IOException { 743 if (owner == null && group == null) { 744 throw new IOException("owner == null && group == null"); 745 } 746 747 statistics.incrementWriteOps(1); 748 final HttpOpParam.Op op = PutOpParam.Op.SETOWNER; 749 run(op, p, new OwnerParam(owner), new GroupParam(group)); 750 } 751 752 @Override 753 public void setPermission(final Path p, final FsPermission permission 754 ) throws IOException { 755 statistics.incrementWriteOps(1); 756 final HttpOpParam.Op op = PutOpParam.Op.SETPERMISSION; 757 run(op, p, new PermissionParam(permission)); 758 } 759 760 @Override 761 public boolean setReplication(final Path p, final short replication 762 ) throws IOException { 763 statistics.incrementWriteOps(1); 764 final HttpOpParam.Op op = PutOpParam.Op.SETREPLICATION; 765 final Map<?, ?> json = run(op, p, new ReplicationParam(replication)); 766 return (Boolean)json.get("boolean"); 767 } 768 769 @Override 770 public void setTimes(final Path p, final long mtime, final long atime 771 ) throws IOException { 772 statistics.incrementWriteOps(1); 773 final HttpOpParam.Op op = PutOpParam.Op.SETTIMES; 774 run(op, p, new ModificationTimeParam(mtime), new AccessTimeParam(atime)); 775 } 776 777 @Override 778 public long getDefaultBlockSize() { 779 return getConf().getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 780 DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT); 781 } 782 783 @Override 784 public short getDefaultReplication() { 785 return (short)getConf().getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 786 DFSConfigKeys.DFS_REPLICATION_DEFAULT); 787 } 788 789 FSDataOutputStream write(final HttpOpParam.Op op, 790 final HttpURLConnection conn, final int bufferSize) throws IOException { 791 return new FSDataOutputStream(new BufferedOutputStream( 792 conn.getOutputStream(), bufferSize), statistics) { 793 @Override 794 public void close() throws IOException { 795 try { 796 super.close(); 797 } finally { 798 try { 799 validateResponse(op, conn, true); 800 } finally { 801 conn.disconnect(); 802 } 803 } 804 } 805 }; 806 } 807 808 @Override 809 public void concat(final Path trg, final Path [] srcs) throws IOException { 810 statistics.incrementWriteOps(1); 811 final HttpOpParam.Op op = PostOpParam.Op.CONCAT; 812 813 ConcatSourcesParam param = new ConcatSourcesParam(srcs); 814 run(op, trg, param); 815 } 816 817 @Override 818 public FSDataOutputStream create(final Path f, final FsPermission permission, 819 final boolean overwrite, final int bufferSize, final short replication, 820 final long blockSize, final Progressable progress) throws IOException { 821 statistics.incrementWriteOps(1); 822 823 final HttpOpParam.Op op = PutOpParam.Op.CREATE; 824 return new FsPathRunner(op, f, 825 new PermissionParam(applyUMask(permission)), 826 new OverwriteParam(overwrite), 827 new BufferSizeParam(bufferSize), 828 new ReplicationParam(replication), 829 new BlockSizeParam(blockSize)) 830 .run() 831 .write(bufferSize); 832 } 833 834 @Override 835 public FSDataOutputStream append(final Path f, final int bufferSize, 836 final Progressable progress) throws IOException { 837 statistics.incrementWriteOps(1); 838 839 final HttpOpParam.Op op = PostOpParam.Op.APPEND; 840 return new FsPathRunner(op, f, new BufferSizeParam(bufferSize)) 841 .run() 842 .write(bufferSize); 843 } 844 845 @Override 846 public boolean delete(Path f, boolean recursive) throws IOException { 847 final HttpOpParam.Op op = DeleteOpParam.Op.DELETE; 848 final Map<?, ?> json = run(op, f, new RecursiveParam(recursive)); 849 return (Boolean)json.get("boolean"); 850 } 851 852 @Override 853 public FSDataInputStream open(final Path f, final int buffersize 854 ) throws IOException { 855 statistics.incrementReadOps(1); 856 final HttpOpParam.Op op = GetOpParam.Op.OPEN; 857 final URL url = toUrl(op, f, new BufferSizeParam(buffersize)); 858 return new FSDataInputStream(new OffsetUrlInputStream( 859 new OffsetUrlOpener(url), new OffsetUrlOpener(null))); 860 } 861 862 @Override 863 public void close() throws IOException { 864 super.close(); 865 synchronized (this) { 866 tokenAspect.removeRenewAction(); 867 } 868 } 869 870 class OffsetUrlOpener extends ByteRangeInputStream.URLOpener { 871 OffsetUrlOpener(final URL url) { 872 super(url); 873 } 874 875 /** Setup offset url and connect. */ 876 @Override 877 protected HttpURLConnection connect(final long offset, 878 final boolean resolved) throws IOException { 879 final URL offsetUrl = offset == 0L? url 880 : new URL(url + "&" + new OffsetParam(offset)); 881 return new URLRunner(GetOpParam.Op.OPEN, offsetUrl, resolved).run().conn; 882 } 883 } 884 885 private static final String OFFSET_PARAM_PREFIX = OffsetParam.NAME + "="; 886 887 /** Remove offset parameter, if there is any, from the url */ 888 static URL removeOffsetParam(final URL url) throws MalformedURLException { 889 String query = url.getQuery(); 890 if (query == null) { 891 return url; 892 } 893 final String lower = query.toLowerCase(); 894 if (!lower.startsWith(OFFSET_PARAM_PREFIX) 895 && !lower.contains("&" + OFFSET_PARAM_PREFIX)) { 896 return url; 897 } 898 899 //rebuild query 900 StringBuilder b = null; 901 for(final StringTokenizer st = new StringTokenizer(query, "&"); 902 st.hasMoreTokens();) { 903 final String token = st.nextToken(); 904 if (!token.toLowerCase().startsWith(OFFSET_PARAM_PREFIX)) { 905 if (b == null) { 906 b = new StringBuilder("?").append(token); 907 } else { 908 b.append('&').append(token); 909 } 910 } 911 } 912 query = b == null? "": b.toString(); 913 914 final String urlStr = url.toString(); 915 return new URL(urlStr.substring(0, urlStr.indexOf('?')) + query); 916 } 917 918 static class OffsetUrlInputStream extends ByteRangeInputStream { 919 OffsetUrlInputStream(OffsetUrlOpener o, OffsetUrlOpener r) { 920 super(o, r); 921 } 922 923 /** Remove offset parameter before returning the resolved url. */ 924 @Override 925 protected URL getResolvedUrl(final HttpURLConnection connection 926 ) throws MalformedURLException { 927 return removeOffsetParam(connection.getURL()); 928 } 929 } 930 931 @Override 932 public FileStatus[] listStatus(final Path f) throws IOException { 933 statistics.incrementReadOps(1); 934 935 final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS; 936 final Map<?, ?> json = run(op, f); 937 final Map<?, ?> rootmap = (Map<?, ?>)json.get(FileStatus.class.getSimpleName() + "es"); 938 final Object[] array = (Object[])rootmap.get(FileStatus.class.getSimpleName()); 939 940 //convert FileStatus 941 final FileStatus[] statuses = new FileStatus[array.length]; 942 for(int i = 0; i < array.length; i++) { 943 final Map<?, ?> m = (Map<?, ?>)array[i]; 944 statuses[i] = makeQualified(JsonUtil.toFileStatus(m, false), f); 945 } 946 return statuses; 947 } 948 949 @Override 950 public Token<DelegationTokenIdentifier> getDelegationToken( 951 final String renewer) throws IOException { 952 final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKEN; 953 final Map<?, ?> m = run(op, null, new RenewerParam(renewer)); 954 final Token<DelegationTokenIdentifier> token = JsonUtil.toDelegationToken(m); 955 SecurityUtil.setTokenService(token, getCurrentNNAddr()); 956 return token; 957 } 958 959 @Override 960 public Token<?> getRenewToken() { 961 return delegationToken; 962 } 963 964 @Override 965 public <T extends TokenIdentifier> void setDelegationToken( 966 final Token<T> token) { 967 synchronized(this) { 968 delegationToken = token; 969 } 970 } 971 972 @Override 973 public synchronized long renewDelegationToken(final Token<?> token 974 ) throws IOException { 975 final HttpOpParam.Op op = PutOpParam.Op.RENEWDELEGATIONTOKEN; 976 TokenArgumentParam dtargParam = new TokenArgumentParam( 977 token.encodeToUrlString()); 978 final Map<?, ?> m = run(op, null, dtargParam); 979 return (Long) m.get("long"); 980 } 981 982 @Override 983 public synchronized void cancelDelegationToken(final Token<?> token 984 ) throws IOException { 985 final HttpOpParam.Op op = PutOpParam.Op.CANCELDELEGATIONTOKEN; 986 TokenArgumentParam dtargParam = new TokenArgumentParam( 987 token.encodeToUrlString()); 988 run(op, null, dtargParam); 989 } 990 991 @Override 992 public BlockLocation[] getFileBlockLocations(final FileStatus status, 993 final long offset, final long length) throws IOException { 994 if (status == null) { 995 return null; 996 } 997 return getFileBlockLocations(status.getPath(), offset, length); 998 } 999 1000 @Override 1001 public BlockLocation[] getFileBlockLocations(final Path p, 1002 final long offset, final long length) throws IOException { 1003 statistics.incrementReadOps(1); 1004 1005 final HttpOpParam.Op op = GetOpParam.Op.GET_BLOCK_LOCATIONS; 1006 final Map<?, ?> m = run(op, p, new OffsetParam(offset), 1007 new LengthParam(length)); 1008 return DFSUtil.locatedBlocks2Locations(JsonUtil.toLocatedBlocks(m)); 1009 } 1010 1011 @Override 1012 public ContentSummary getContentSummary(final Path p) throws IOException { 1013 statistics.incrementReadOps(1); 1014 1015 final HttpOpParam.Op op = GetOpParam.Op.GETCONTENTSUMMARY; 1016 final Map<?, ?> m = run(op, p); 1017 return JsonUtil.toContentSummary(m); 1018 } 1019 1020 @Override 1021 public MD5MD5CRC32FileChecksum getFileChecksum(final Path p 1022 ) throws IOException { 1023 statistics.incrementReadOps(1); 1024 1025 final HttpOpParam.Op op = GetOpParam.Op.GETFILECHECKSUM; 1026 final Map<?, ?> m = run(op, p); 1027 return JsonUtil.toMD5MD5CRC32FileChecksum(m); 1028 } 1029 }