001 /** 002 * Licensed to the Apache Software Foundation (ASF) under one 003 * or more contributor license agreements. See the NOTICE file 004 * distributed with this work for additional information 005 * regarding copyright ownership. The ASF licenses this file 006 * to you under the Apache License, Version 2.0 (the 007 * "License"); you may not use this file except in compliance 008 * with the License. You may obtain a copy of the License at 009 * 010 * http://www.apache.org/licenses/LICENSE-2.0 011 * 012 * Unless required by applicable law or agreed to in writing, software 013 * distributed under the License is distributed on an "AS IS" BASIS, 014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 015 * See the License for the specific language governing permissions and 016 * limitations under the License. 017 */ 018 019 package org.apache.hadoop.hdfs.web; 020 021 import java.io.BufferedOutputStream; 022 import java.io.FileNotFoundException; 023 import java.io.IOException; 024 import java.io.InputStream; 025 import java.io.InputStreamReader; 026 import java.net.HttpURLConnection; 027 import java.net.InetSocketAddress; 028 import java.net.MalformedURLException; 029 import java.net.URI; 030 import java.net.URISyntaxException; 031 import java.net.URL; 032 import java.util.Collection; 033 import java.util.List; 034 import java.util.Map; 035 import java.util.StringTokenizer; 036 037 import org.apache.commons.logging.Log; 038 import org.apache.commons.logging.LogFactory; 039 import org.apache.hadoop.conf.Configuration; 040 import org.apache.hadoop.fs.BlockLocation; 041 import org.apache.hadoop.fs.ContentSummary; 042 import org.apache.hadoop.fs.FSDataInputStream; 043 import org.apache.hadoop.fs.FSDataOutputStream; 044 import org.apache.hadoop.fs.FileAlreadyExistsException; 045 import org.apache.hadoop.fs.FileStatus; 046 import org.apache.hadoop.fs.FileSystem; 047 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; 048 import org.apache.hadoop.fs.Options; 049 import org.apache.hadoop.fs.ParentNotDirectoryException; 050 import org.apache.hadoop.fs.Path; 051 import org.apache.hadoop.fs.permission.FsPermission; 052 import org.apache.hadoop.hdfs.ByteRangeInputStream; 053 import org.apache.hadoop.hdfs.DFSConfigKeys; 054 import org.apache.hadoop.hdfs.DFSUtil; 055 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; 056 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; 057 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; 058 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; 059 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; 060 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenRenewer; 061 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; 062 import org.apache.hadoop.hdfs.server.common.JspHelper; 063 import org.apache.hadoop.hdfs.server.namenode.SafeModeException; 064 import org.apache.hadoop.hdfs.web.resources.AccessTimeParam; 065 import org.apache.hadoop.hdfs.web.resources.BlockSizeParam; 066 import org.apache.hadoop.hdfs.web.resources.BufferSizeParam; 067 import org.apache.hadoop.hdfs.web.resources.CreateParentParam; 068 import org.apache.hadoop.hdfs.web.resources.DeleteOpParam; 069 import org.apache.hadoop.hdfs.web.resources.DestinationParam; 070 import org.apache.hadoop.hdfs.web.resources.GetOpParam; 071 import org.apache.hadoop.hdfs.web.resources.GroupParam; 072 import org.apache.hadoop.hdfs.web.resources.HttpOpParam; 073 import org.apache.hadoop.hdfs.web.resources.LengthParam; 074 import org.apache.hadoop.hdfs.web.resources.ModificationTimeParam; 075 import org.apache.hadoop.hdfs.web.resources.OffsetParam; 076 import org.apache.hadoop.hdfs.web.resources.OverwriteParam; 077 import org.apache.hadoop.hdfs.web.resources.OwnerParam; 078 import org.apache.hadoop.hdfs.web.resources.Param; 079 import org.apache.hadoop.hdfs.web.resources.PermissionParam; 080 import org.apache.hadoop.hdfs.web.resources.PostOpParam; 081 import org.apache.hadoop.hdfs.web.resources.PutOpParam; 082 import org.apache.hadoop.hdfs.web.resources.RecursiveParam; 083 import org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam; 084 import org.apache.hadoop.hdfs.web.resources.RenewerParam; 085 import org.apache.hadoop.hdfs.web.resources.ReplicationParam; 086 import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam; 087 import org.apache.hadoop.hdfs.web.resources.UserParam; 088 import org.apache.hadoop.io.Text; 089 import org.apache.hadoop.ipc.RemoteException; 090 import org.apache.hadoop.net.NetUtils; 091 import org.apache.hadoop.security.AccessControlException; 092 import org.apache.hadoop.security.SecurityUtil; 093 import org.apache.hadoop.security.UserGroupInformation; 094 import org.apache.hadoop.security.authentication.client.AuthenticatedURL; 095 import org.apache.hadoop.security.authentication.client.AuthenticationException; 096 import org.apache.hadoop.security.authorize.AuthorizationException; 097 import org.apache.hadoop.security.token.SecretManager.InvalidToken; 098 import org.apache.hadoop.security.token.Token; 099 import org.apache.hadoop.security.token.TokenIdentifier; 100 import org.apache.hadoop.security.token.TokenRenewer; 101 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector; 102 import org.apache.hadoop.util.Progressable; 103 import org.mortbay.util.ajax.JSON; 104 105 /** A FileSystem for HDFS over the web. */ 106 public class WebHdfsFileSystem extends FileSystem 107 implements DelegationTokenRenewer.Renewable { 108 public static final Log LOG = LogFactory.getLog(WebHdfsFileSystem.class); 109 /** File System URI: {SCHEME}://namenode:port/path/to/file */ 110 public static final String SCHEME = "webhdfs"; 111 /** WebHdfs version. */ 112 public static final int VERSION = 1; 113 /** Http URI: http://namenode:port/{PATH_PREFIX}/path/to/file */ 114 public static final String PATH_PREFIX = "/" + SCHEME + "/v" + VERSION; 115 116 /** SPNEGO authenticator */ 117 private static final KerberosUgiAuthenticator AUTH = new KerberosUgiAuthenticator(); 118 /** Delegation token kind */ 119 public static final Text TOKEN_KIND = new Text("WEBHDFS delegation"); 120 /** Token selector */ 121 public static final WebHdfsDelegationTokenSelector DT_SELECTOR 122 = new WebHdfsDelegationTokenSelector(); 123 124 private static DelegationTokenRenewer<WebHdfsFileSystem> DT_RENEWER = null; 125 126 private static synchronized void addRenewAction(final WebHdfsFileSystem webhdfs) { 127 if (DT_RENEWER == null) { 128 DT_RENEWER = new DelegationTokenRenewer<WebHdfsFileSystem>(WebHdfsFileSystem.class); 129 DT_RENEWER.start(); 130 } 131 132 DT_RENEWER.addRenewAction(webhdfs); 133 } 134 135 /** Is WebHDFS enabled in conf? */ 136 public static boolean isEnabled(final Configuration conf, final Log log) { 137 final boolean b = conf.getBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, 138 DFSConfigKeys.DFS_WEBHDFS_ENABLED_DEFAULT); 139 log.info(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY + " = " + b); 140 return b; 141 } 142 143 private final UserGroupInformation ugi; 144 private InetSocketAddress nnAddr; 145 private URI uri; 146 private Token<?> delegationToken; 147 private final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token(); 148 private Path workingDir; 149 150 { 151 try { 152 ugi = UserGroupInformation.getCurrentUser(); 153 } catch (IOException e) { 154 throw new RuntimeException(e); 155 } 156 } 157 158 /** 159 * Return the protocol scheme for the FileSystem. 160 * <p/> 161 * 162 * @return <code>webhdfs</code> 163 */ 164 @Override 165 public String getScheme() { 166 return "webhdfs"; 167 } 168 169 @Override 170 public synchronized void initialize(URI uri, Configuration conf 171 ) throws IOException { 172 super.initialize(uri, conf); 173 setConf(conf); 174 try { 175 this.uri = new URI(uri.getScheme(), uri.getAuthority(), null, null, null); 176 } catch (URISyntaxException e) { 177 throw new IllegalArgumentException(e); 178 } 179 this.nnAddr = NetUtils.createSocketAddr(uri.getAuthority(), getDefaultPort()); 180 this.workingDir = getHomeDirectory(); 181 182 if (UserGroupInformation.isSecurityEnabled()) { 183 initDelegationToken(); 184 } 185 } 186 187 protected void initDelegationToken() throws IOException { 188 // look for webhdfs token, then try hdfs 189 Token<?> token = selectDelegationToken(ugi); 190 191 //since we don't already have a token, go get one 192 boolean createdToken = false; 193 if (token == null) { 194 token = getDelegationToken(null); 195 createdToken = (token != null); 196 } 197 198 // security might be disabled 199 if (token != null) { 200 setDelegationToken(token); 201 if (createdToken) { 202 addRenewAction(this); 203 LOG.debug("Created new DT for " + token.getService()); 204 } else { 205 LOG.debug("Found existing DT for " + token.getService()); 206 } 207 } 208 } 209 210 protected Token<DelegationTokenIdentifier> selectDelegationToken( 211 UserGroupInformation ugi) { 212 return DT_SELECTOR.selectToken(getCanonicalUri(), ugi.getTokens(), getConf()); 213 } 214 215 @Override 216 protected int getDefaultPort() { 217 return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY, 218 DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT); 219 } 220 221 @Override 222 public URI getUri() { 223 return this.uri; 224 } 225 226 /** @return the home directory. */ 227 public static String getHomeDirectoryString(final UserGroupInformation ugi) { 228 return "/user/" + ugi.getShortUserName(); 229 } 230 231 @Override 232 public Path getHomeDirectory() { 233 return makeQualified(new Path(getHomeDirectoryString(ugi))); 234 } 235 236 @Override 237 public synchronized Path getWorkingDirectory() { 238 return workingDir; 239 } 240 241 @Override 242 public synchronized void setWorkingDirectory(final Path dir) { 243 String result = makeAbsolute(dir).toUri().getPath(); 244 if (!DFSUtil.isValidName(result)) { 245 throw new IllegalArgumentException("Invalid DFS directory name " + 246 result); 247 } 248 workingDir = makeAbsolute(dir); 249 } 250 251 private Path makeAbsolute(Path f) { 252 return f.isAbsolute()? f: new Path(workingDir, f); 253 } 254 255 static Map<?, ?> jsonParse(final InputStream in) throws IOException { 256 if (in == null) { 257 throw new IOException("The input stream is null."); 258 } 259 return (Map<?, ?>)JSON.parse(new InputStreamReader(in)); 260 } 261 262 private static Map<?, ?> validateResponse(final HttpOpParam.Op op, 263 final HttpURLConnection conn) throws IOException { 264 final int code = conn.getResponseCode(); 265 if (code != op.getExpectedHttpResponseCode()) { 266 final Map<?, ?> m; 267 try { 268 m = jsonParse(conn.getErrorStream()); 269 } catch(IOException e) { 270 throw new IOException("Unexpected HTTP response: code=" + code + " != " 271 + op.getExpectedHttpResponseCode() + ", " + op.toQueryString() 272 + ", message=" + conn.getResponseMessage(), e); 273 } 274 275 if (m.get(RemoteException.class.getSimpleName()) == null) { 276 return m; 277 } 278 279 final RemoteException re = JsonUtil.toRemoteException(m); 280 throw re.unwrapRemoteException(AccessControlException.class, 281 InvalidToken.class, 282 AuthenticationException.class, 283 AuthorizationException.class, 284 FileAlreadyExistsException.class, 285 FileNotFoundException.class, 286 ParentNotDirectoryException.class, 287 UnresolvedPathException.class, 288 SafeModeException.class, 289 DSQuotaExceededException.class, 290 NSQuotaExceededException.class); 291 } 292 return null; 293 } 294 295 /** 296 * Return a URL pointing to given path on the namenode. 297 * 298 * @param path to obtain the URL for 299 * @param query string to append to the path 300 * @return namenode URL referring to the given path 301 * @throws IOException on error constructing the URL 302 */ 303 private URL getNamenodeURL(String path, String query) throws IOException { 304 final URL url = new URL("http", nnAddr.getHostName(), 305 nnAddr.getPort(), path + '?' + query); 306 if (LOG.isTraceEnabled()) { 307 LOG.trace("url=" + url); 308 } 309 return url; 310 } 311 312 private String addDt2Query(String query) throws IOException { 313 if (UserGroupInformation.isSecurityEnabled()) { 314 synchronized (this) { 315 if (delegationToken != null) { 316 final String encoded = delegationToken.encodeToUrlString(); 317 return query + JspHelper.getDelegationTokenUrlParam(encoded); 318 } // else we are talking to an insecure cluster 319 } 320 } 321 return query; 322 } 323 324 URL toUrl(final HttpOpParam.Op op, final Path fspath, 325 final Param<?,?>... parameters) throws IOException { 326 //initialize URI path and query 327 final String path = PATH_PREFIX 328 + (fspath == null? "/": makeQualified(fspath).toUri().getPath()); 329 final String query = op.toQueryString() 330 + '&' + new UserParam(ugi) 331 + Param.toSortedString("&", parameters); 332 final URL url; 333 if (op == PutOpParam.Op.RENEWDELEGATIONTOKEN 334 || op == GetOpParam.Op.GETDELEGATIONTOKEN 335 || op == GetOpParam.Op.GETDELEGATIONTOKENS) { 336 // Skip adding delegation token for getting or renewing delegation token, 337 // because these operations require kerberos authentication. 338 url = getNamenodeURL(path, query); 339 } else { 340 url = getNamenodeURL(path, addDt2Query(query)); 341 } 342 if (LOG.isTraceEnabled()) { 343 LOG.trace("url=" + url); 344 } 345 return url; 346 } 347 348 private HttpURLConnection getHttpUrlConnection(URL url) 349 throws IOException { 350 final HttpURLConnection conn; 351 try { 352 if (ugi.hasKerberosCredentials()) { 353 conn = new AuthenticatedURL(AUTH).openConnection(url, authToken); 354 } else { 355 conn = (HttpURLConnection)url.openConnection(); 356 } 357 } catch (AuthenticationException e) { 358 throw new IOException("Authentication failed, url=" + url, e); 359 } 360 return conn; 361 } 362 363 private HttpURLConnection httpConnect(final HttpOpParam.Op op, final Path fspath, 364 final Param<?,?>... parameters) throws IOException { 365 final URL url = toUrl(op, fspath, parameters); 366 367 //connect and get response 368 HttpURLConnection conn = getHttpUrlConnection(url); 369 try { 370 conn.setRequestMethod(op.getType().toString()); 371 if (op.getDoOutput()) { 372 conn = twoStepWrite(conn, op); 373 conn.setRequestProperty("Content-Type", "application/octet-stream"); 374 } 375 conn.setDoOutput(op.getDoOutput()); 376 conn.connect(); 377 return conn; 378 } catch (IOException e) { 379 conn.disconnect(); 380 throw e; 381 } 382 } 383 384 /** 385 * Two-step Create/Append: 386 * Step 1) Submit a Http request with neither auto-redirect nor data. 387 * Step 2) Submit another Http request with the URL from the Location header with data. 388 * 389 * The reason of having two-step create/append is for preventing clients to 390 * send out the data before the redirect. This issue is addressed by the 391 * "Expect: 100-continue" header in HTTP/1.1; see RFC 2616, Section 8.2.3. 392 * Unfortunately, there are software library bugs (e.g. Jetty 6 http server 393 * and Java 6 http client), which do not correctly implement "Expect: 394 * 100-continue". The two-step create/append is a temporary workaround for 395 * the software library bugs. 396 */ 397 static HttpURLConnection twoStepWrite(HttpURLConnection conn, 398 final HttpOpParam.Op op) throws IOException { 399 //Step 1) Submit a Http request with neither auto-redirect nor data. 400 conn.setInstanceFollowRedirects(false); 401 conn.setDoOutput(false); 402 conn.connect(); 403 validateResponse(HttpOpParam.TemporaryRedirectOp.valueOf(op), conn); 404 final String redirect = conn.getHeaderField("Location"); 405 conn.disconnect(); 406 407 //Step 2) Submit another Http request with the URL from the Location header with data. 408 conn = (HttpURLConnection)new URL(redirect).openConnection(); 409 conn.setRequestMethod(op.getType().toString()); 410 return conn; 411 } 412 413 /** 414 * Run a http operation. 415 * Connect to the http server, validate response, and obtain the JSON output. 416 * 417 * @param op http operation 418 * @param fspath file system path 419 * @param parameters parameters for the operation 420 * @return a JSON object, e.g. Object[], Map<?, ?>, etc. 421 * @throws IOException 422 */ 423 private Map<?, ?> run(final HttpOpParam.Op op, final Path fspath, 424 final Param<?,?>... parameters) throws IOException { 425 final HttpURLConnection conn = httpConnect(op, fspath, parameters); 426 try { 427 final Map<?, ?> m = validateResponse(op, conn); 428 return m != null? m: jsonParse(conn.getInputStream()); 429 } finally { 430 conn.disconnect(); 431 } 432 } 433 434 private FsPermission applyUMask(FsPermission permission) { 435 if (permission == null) { 436 permission = FsPermission.getDefault(); 437 } 438 return permission.applyUMask(FsPermission.getUMask(getConf())); 439 } 440 441 private HdfsFileStatus getHdfsFileStatus(Path f) throws IOException { 442 final HttpOpParam.Op op = GetOpParam.Op.GETFILESTATUS; 443 final Map<?, ?> json = run(op, f); 444 final HdfsFileStatus status = JsonUtil.toFileStatus(json, true); 445 if (status == null) { 446 throw new FileNotFoundException("File does not exist: " + f); 447 } 448 return status; 449 } 450 451 @Override 452 public FileStatus getFileStatus(Path f) throws IOException { 453 statistics.incrementReadOps(1); 454 return makeQualified(getHdfsFileStatus(f), f); 455 } 456 457 private FileStatus makeQualified(HdfsFileStatus f, Path parent) { 458 return new FileStatus(f.getLen(), f.isDir(), f.getReplication(), 459 f.getBlockSize(), f.getModificationTime(), f.getAccessTime(), 460 f.getPermission(), f.getOwner(), f.getGroup(), 461 f.isSymlink() ? new Path(f.getSymlink()) : null, 462 f.getFullPath(parent).makeQualified(getUri(), getWorkingDirectory())); 463 } 464 465 @Override 466 public boolean mkdirs(Path f, FsPermission permission) throws IOException { 467 statistics.incrementWriteOps(1); 468 final HttpOpParam.Op op = PutOpParam.Op.MKDIRS; 469 final Map<?, ?> json = run(op, f, 470 new PermissionParam(applyUMask(permission))); 471 return (Boolean)json.get("boolean"); 472 } 473 474 /** 475 * Create a symlink pointing to the destination path. 476 * @see org.apache.hadoop.fs.Hdfs#createSymlink(Path, Path, boolean) 477 */ 478 public void createSymlink(Path destination, Path f, boolean createParent 479 ) throws IOException { 480 statistics.incrementWriteOps(1); 481 final HttpOpParam.Op op = PutOpParam.Op.CREATESYMLINK; 482 run(op, f, new DestinationParam(makeQualified(destination).toUri().getPath()), 483 new CreateParentParam(createParent)); 484 } 485 486 @Override 487 public boolean rename(final Path src, final Path dst) throws IOException { 488 statistics.incrementWriteOps(1); 489 final HttpOpParam.Op op = PutOpParam.Op.RENAME; 490 final Map<?, ?> json = run(op, src, 491 new DestinationParam(makeQualified(dst).toUri().getPath())); 492 return (Boolean)json.get("boolean"); 493 } 494 495 @SuppressWarnings("deprecation") 496 @Override 497 public void rename(final Path src, final Path dst, 498 final Options.Rename... options) throws IOException { 499 statistics.incrementWriteOps(1); 500 final HttpOpParam.Op op = PutOpParam.Op.RENAME; 501 run(op, src, new DestinationParam(makeQualified(dst).toUri().getPath()), 502 new RenameOptionSetParam(options)); 503 } 504 505 @Override 506 public void setOwner(final Path p, final String owner, final String group 507 ) throws IOException { 508 if (owner == null && group == null) { 509 throw new IOException("owner == null && group == null"); 510 } 511 512 statistics.incrementWriteOps(1); 513 final HttpOpParam.Op op = PutOpParam.Op.SETOWNER; 514 run(op, p, new OwnerParam(owner), new GroupParam(group)); 515 } 516 517 @Override 518 public void setPermission(final Path p, final FsPermission permission 519 ) throws IOException { 520 statistics.incrementWriteOps(1); 521 final HttpOpParam.Op op = PutOpParam.Op.SETPERMISSION; 522 run(op, p, new PermissionParam(permission)); 523 } 524 525 @Override 526 public boolean setReplication(final Path p, final short replication 527 ) throws IOException { 528 statistics.incrementWriteOps(1); 529 final HttpOpParam.Op op = PutOpParam.Op.SETREPLICATION; 530 final Map<?, ?> json = run(op, p, new ReplicationParam(replication)); 531 return (Boolean)json.get("boolean"); 532 } 533 534 @Override 535 public void setTimes(final Path p, final long mtime, final long atime 536 ) throws IOException { 537 statistics.incrementWriteOps(1); 538 final HttpOpParam.Op op = PutOpParam.Op.SETTIMES; 539 run(op, p, new ModificationTimeParam(mtime), new AccessTimeParam(atime)); 540 } 541 542 @Override 543 public long getDefaultBlockSize() { 544 return getConf().getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 545 DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT); 546 } 547 548 @Override 549 public short getDefaultReplication() { 550 return (short)getConf().getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 551 DFSConfigKeys.DFS_REPLICATION_DEFAULT); 552 } 553 554 FSDataOutputStream write(final HttpOpParam.Op op, 555 final HttpURLConnection conn, final int bufferSize) throws IOException { 556 return new FSDataOutputStream(new BufferedOutputStream( 557 conn.getOutputStream(), bufferSize), statistics) { 558 @Override 559 public void close() throws IOException { 560 try { 561 super.close(); 562 } finally { 563 try { 564 validateResponse(op, conn); 565 } finally { 566 conn.disconnect(); 567 } 568 } 569 } 570 }; 571 } 572 573 @Override 574 public FSDataOutputStream create(final Path f, final FsPermission permission, 575 final boolean overwrite, final int bufferSize, final short replication, 576 final long blockSize, final Progressable progress) throws IOException { 577 statistics.incrementWriteOps(1); 578 579 final HttpOpParam.Op op = PutOpParam.Op.CREATE; 580 final HttpURLConnection conn = httpConnect(op, f, 581 new PermissionParam(applyUMask(permission)), 582 new OverwriteParam(overwrite), 583 new BufferSizeParam(bufferSize), 584 new ReplicationParam(replication), 585 new BlockSizeParam(blockSize)); 586 return write(op, conn, bufferSize); 587 } 588 589 @Override 590 public FSDataOutputStream append(final Path f, final int bufferSize, 591 final Progressable progress) throws IOException { 592 statistics.incrementWriteOps(1); 593 594 final HttpOpParam.Op op = PostOpParam.Op.APPEND; 595 final HttpURLConnection conn = httpConnect(op, f, 596 new BufferSizeParam(bufferSize)); 597 return write(op, conn, bufferSize); 598 } 599 600 @SuppressWarnings("deprecation") 601 @Override 602 public boolean delete(final Path f) throws IOException { 603 return delete(f, true); 604 } 605 606 @Override 607 public boolean delete(Path f, boolean recursive) throws IOException { 608 final HttpOpParam.Op op = DeleteOpParam.Op.DELETE; 609 final Map<?, ?> json = run(op, f, new RecursiveParam(recursive)); 610 return (Boolean)json.get("boolean"); 611 } 612 613 @Override 614 public FSDataInputStream open(final Path f, final int buffersize 615 ) throws IOException { 616 statistics.incrementReadOps(1); 617 final HttpOpParam.Op op = GetOpParam.Op.OPEN; 618 final URL url = toUrl(op, f, new BufferSizeParam(buffersize)); 619 return new FSDataInputStream(new OffsetUrlInputStream( 620 new OffsetUrlOpener(url), new OffsetUrlOpener(null))); 621 } 622 623 class OffsetUrlOpener extends ByteRangeInputStream.URLOpener { 624 /** The url with offset parameter */ 625 private URL offsetUrl; 626 627 OffsetUrlOpener(final URL url) { 628 super(url); 629 } 630 631 /** Open connection with offset url. */ 632 @Override 633 protected HttpURLConnection openConnection() throws IOException { 634 return getHttpUrlConnection(offsetUrl); 635 } 636 637 /** Setup offset url before open connection. */ 638 @Override 639 protected HttpURLConnection openConnection(final long offset) throws IOException { 640 offsetUrl = offset == 0L? url: new URL(url + "&" + new OffsetParam(offset)); 641 final HttpURLConnection conn = openConnection(); 642 conn.setRequestMethod("GET"); 643 return conn; 644 } 645 } 646 647 private static final String OFFSET_PARAM_PREFIX = OffsetParam.NAME + "="; 648 649 /** Remove offset parameter, if there is any, from the url */ 650 static URL removeOffsetParam(final URL url) throws MalformedURLException { 651 String query = url.getQuery(); 652 if (query == null) { 653 return url; 654 } 655 final String lower = query.toLowerCase(); 656 if (!lower.startsWith(OFFSET_PARAM_PREFIX) 657 && !lower.contains("&" + OFFSET_PARAM_PREFIX)) { 658 return url; 659 } 660 661 //rebuild query 662 StringBuilder b = null; 663 for(final StringTokenizer st = new StringTokenizer(query, "&"); 664 st.hasMoreTokens();) { 665 final String token = st.nextToken(); 666 if (!token.toLowerCase().startsWith(OFFSET_PARAM_PREFIX)) { 667 if (b == null) { 668 b = new StringBuilder("?").append(token); 669 } else { 670 b.append('&').append(token); 671 } 672 } 673 } 674 query = b == null? "": b.toString(); 675 676 final String urlStr = url.toString(); 677 return new URL(urlStr.substring(0, urlStr.indexOf('?')) + query); 678 } 679 680 static class OffsetUrlInputStream extends ByteRangeInputStream { 681 OffsetUrlInputStream(OffsetUrlOpener o, OffsetUrlOpener r) { 682 super(o, r); 683 } 684 685 @Override 686 protected void checkResponseCode(final HttpURLConnection connection 687 ) throws IOException { 688 validateResponse(GetOpParam.Op.OPEN, connection); 689 } 690 691 /** Remove offset parameter before returning the resolved url. */ 692 @Override 693 protected URL getResolvedUrl(final HttpURLConnection connection 694 ) throws MalformedURLException { 695 return removeOffsetParam(connection.getURL()); 696 } 697 } 698 699 @Override 700 public FileStatus[] listStatus(final Path f) throws IOException { 701 statistics.incrementReadOps(1); 702 703 final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS; 704 final Map<?, ?> json = run(op, f); 705 final Map<?, ?> rootmap = (Map<?, ?>)json.get(FileStatus.class.getSimpleName() + "es"); 706 final Object[] array = (Object[])rootmap.get(FileStatus.class.getSimpleName()); 707 708 //convert FileStatus 709 final FileStatus[] statuses = new FileStatus[array.length]; 710 for(int i = 0; i < array.length; i++) { 711 final Map<?, ?> m = (Map<?, ?>)array[i]; 712 statuses[i] = makeQualified(JsonUtil.toFileStatus(m, false), f); 713 } 714 return statuses; 715 } 716 717 @SuppressWarnings("deprecation") 718 @Override 719 public Token<DelegationTokenIdentifier> getDelegationToken(final String renewer 720 ) throws IOException { 721 final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKEN; 722 final Map<?, ?> m = run(op, null, new RenewerParam(renewer)); 723 final Token<DelegationTokenIdentifier> token = JsonUtil.toDelegationToken(m); 724 SecurityUtil.setTokenService(token, nnAddr); 725 return token; 726 } 727 728 @Override 729 public List<Token<?>> getDelegationTokens(final String renewer 730 ) throws IOException { 731 final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKENS; 732 final Map<?, ?> m = run(op, null, new RenewerParam(renewer)); 733 final List<Token<?>> tokens = JsonUtil.toTokenList(m); 734 for(Token<?> t : tokens) { 735 SecurityUtil.setTokenService(t, nnAddr); 736 } 737 return tokens; 738 } 739 740 @Override 741 public Token<?> getRenewToken() { 742 return delegationToken; 743 } 744 745 @Override 746 public <T extends TokenIdentifier> void setDelegationToken( 747 final Token<T> token) { 748 synchronized(this) { 749 delegationToken = token; 750 } 751 } 752 753 private synchronized long renewDelegationToken(final Token<?> token 754 ) throws IOException { 755 final HttpOpParam.Op op = PutOpParam.Op.RENEWDELEGATIONTOKEN; 756 TokenArgumentParam dtargParam = new TokenArgumentParam( 757 token.encodeToUrlString()); 758 final Map<?, ?> m = run(op, null, dtargParam); 759 return (Long) m.get("long"); 760 } 761 762 private synchronized void cancelDelegationToken(final Token<?> token 763 ) throws IOException { 764 final HttpOpParam.Op op = PutOpParam.Op.CANCELDELEGATIONTOKEN; 765 TokenArgumentParam dtargParam = new TokenArgumentParam( 766 token.encodeToUrlString()); 767 run(op, null, dtargParam); 768 } 769 770 @Override 771 public BlockLocation[] getFileBlockLocations(final FileStatus status, 772 final long offset, final long length) throws IOException { 773 if (status == null) { 774 return null; 775 } 776 return getFileBlockLocations(status.getPath(), offset, length); 777 } 778 779 @Override 780 public BlockLocation[] getFileBlockLocations(final Path p, 781 final long offset, final long length) throws IOException { 782 statistics.incrementReadOps(1); 783 784 final HttpOpParam.Op op = GetOpParam.Op.GET_BLOCK_LOCATIONS; 785 final Map<?, ?> m = run(op, p, new OffsetParam(offset), 786 new LengthParam(length)); 787 return DFSUtil.locatedBlocks2Locations(JsonUtil.toLocatedBlocks(m)); 788 } 789 790 @Override 791 public ContentSummary getContentSummary(final Path p) throws IOException { 792 statistics.incrementReadOps(1); 793 794 final HttpOpParam.Op op = GetOpParam.Op.GETCONTENTSUMMARY; 795 final Map<?, ?> m = run(op, p); 796 return JsonUtil.toContentSummary(m); 797 } 798 799 @Override 800 public MD5MD5CRC32FileChecksum getFileChecksum(final Path p 801 ) throws IOException { 802 statistics.incrementReadOps(1); 803 804 final HttpOpParam.Op op = GetOpParam.Op.GETFILECHECKSUM; 805 final Map<?, ?> m = run(op, p); 806 return JsonUtil.toMD5MD5CRC32FileChecksum(m); 807 } 808 809 /** Delegation token renewer. */ 810 public static class DtRenewer extends TokenRenewer { 811 @Override 812 public boolean handleKind(Text kind) { 813 return kind.equals(TOKEN_KIND); 814 } 815 816 @Override 817 public boolean isManaged(Token<?> token) throws IOException { 818 return true; 819 } 820 821 private static WebHdfsFileSystem getWebHdfs( 822 final Token<?> token, final Configuration conf 823 ) throws IOException, InterruptedException, URISyntaxException { 824 825 final InetSocketAddress nnAddr = SecurityUtil.getTokenServiceAddr(token); 826 final URI uri = DFSUtil.createUri(WebHdfsFileSystem.SCHEME, nnAddr); 827 return (WebHdfsFileSystem)FileSystem.get(uri, conf); 828 } 829 830 @Override 831 public long renew(final Token<?> token, final Configuration conf 832 ) throws IOException, InterruptedException { 833 final UserGroupInformation ugi = UserGroupInformation.getLoginUser(); 834 // update the kerberos credentials, if they are coming from a keytab 835 ugi.reloginFromKeytab(); 836 837 try { 838 WebHdfsFileSystem webhdfs = getWebHdfs(token, conf); 839 return webhdfs.renewDelegationToken(token); 840 } catch (URISyntaxException e) { 841 throw new IOException(e); 842 } 843 } 844 845 @Override 846 public void cancel(final Token<?> token, final Configuration conf 847 ) throws IOException, InterruptedException { 848 final UserGroupInformation ugi = UserGroupInformation.getLoginUser(); 849 // update the kerberos credentials, if they are coming from a keytab 850 ugi.checkTGTAndReloginFromKeytab(); 851 852 try { 853 final WebHdfsFileSystem webhdfs = getWebHdfs(token, conf); 854 webhdfs.cancelDelegationToken(token); 855 } catch (URISyntaxException e) { 856 throw new IOException(e); 857 } 858 } 859 } 860 861 private static class WebHdfsDelegationTokenSelector 862 extends AbstractDelegationTokenSelector<DelegationTokenIdentifier> { 863 private static final DelegationTokenSelector hdfsTokenSelector = 864 new DelegationTokenSelector(); 865 866 public WebHdfsDelegationTokenSelector() { 867 super(TOKEN_KIND); 868 } 869 870 Token<DelegationTokenIdentifier> selectToken(URI nnUri, 871 Collection<Token<?>> tokens, Configuration conf) { 872 Token<DelegationTokenIdentifier> token = 873 selectToken(SecurityUtil.buildTokenService(nnUri), tokens); 874 if (token == null) { 875 token = hdfsTokenSelector.selectToken(nnUri, tokens, conf); 876 } 877 return token; 878 } 879 } 880 }