001    /**
002     * Licensed to the Apache Software Foundation (ASF) under one
003     * or more contributor license agreements.  See the NOTICE file
004     * distributed with this work for additional information
005     * regarding copyright ownership.  The ASF licenses this file
006     * to you under the Apache License, Version 2.0 (the
007     * "License"); you may not use this file except in compliance
008     * with the License.  You may obtain a copy of the License at
009     *
010     *     http://www.apache.org/licenses/LICENSE-2.0
011     *
012     * Unless required by applicable law or agreed to in writing, software
013     * distributed under the License is distributed on an "AS IS" BASIS,
014     * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015     * See the License for the specific language governing permissions and
016     * limitations under the License.
017     */
018    
019    package org.apache.hadoop.hdfs.web;
020    
021    import java.io.BufferedOutputStream;
022    import java.io.FileNotFoundException;
023    import java.io.IOException;
024    import java.io.InputStream;
025    import java.io.InputStreamReader;
026    import java.net.HttpURLConnection;
027    import java.net.InetSocketAddress;
028    import java.net.MalformedURLException;
029    import java.net.URI;
030    import java.net.URISyntaxException;
031    import java.net.URL;
032    import java.util.List;
033    import java.util.Map;
034    import java.util.StringTokenizer;
035    
036    import org.apache.commons.logging.Log;
037    import org.apache.commons.logging.LogFactory;
038    import org.apache.hadoop.conf.Configuration;
039    import org.apache.hadoop.fs.BlockLocation;
040    import org.apache.hadoop.fs.ContentSummary;
041    import org.apache.hadoop.fs.FSDataInputStream;
042    import org.apache.hadoop.fs.FSDataOutputStream;
043    import org.apache.hadoop.fs.FileAlreadyExistsException;
044    import org.apache.hadoop.fs.FileStatus;
045    import org.apache.hadoop.fs.FileSystem;
046    import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
047    import org.apache.hadoop.fs.Options;
048    import org.apache.hadoop.fs.ParentNotDirectoryException;
049    import org.apache.hadoop.fs.Path;
050    import org.apache.hadoop.fs.permission.FsPermission;
051    import org.apache.hadoop.hdfs.ByteRangeInputStream;
052    import org.apache.hadoop.hdfs.DFSConfigKeys;
053    import org.apache.hadoop.hdfs.DFSUtil;
054    import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
055    import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
056    import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
057    import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
058    import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
059    import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenRenewer;
060    import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
061    import org.apache.hadoop.hdfs.server.common.JspHelper;
062    import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
063    import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
064    import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
065    import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
066    import org.apache.hadoop.hdfs.web.resources.CreateParentParam;
067    import org.apache.hadoop.hdfs.web.resources.DeleteOpParam;
068    import org.apache.hadoop.hdfs.web.resources.DestinationParam;
069    import org.apache.hadoop.hdfs.web.resources.GetOpParam;
070    import org.apache.hadoop.hdfs.web.resources.GroupParam;
071    import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
072    import org.apache.hadoop.hdfs.web.resources.LengthParam;
073    import org.apache.hadoop.hdfs.web.resources.ModificationTimeParam;
074    import org.apache.hadoop.hdfs.web.resources.OffsetParam;
075    import org.apache.hadoop.hdfs.web.resources.OverwriteParam;
076    import org.apache.hadoop.hdfs.web.resources.OwnerParam;
077    import org.apache.hadoop.hdfs.web.resources.Param;
078    import org.apache.hadoop.hdfs.web.resources.PermissionParam;
079    import org.apache.hadoop.hdfs.web.resources.PostOpParam;
080    import org.apache.hadoop.hdfs.web.resources.PutOpParam;
081    import org.apache.hadoop.hdfs.web.resources.RecursiveParam;
082    import org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam;
083    import org.apache.hadoop.hdfs.web.resources.RenewerParam;
084    import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
085    import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
086    import org.apache.hadoop.hdfs.web.resources.UserParam;
087    import org.apache.hadoop.io.Text;
088    import org.apache.hadoop.ipc.RemoteException;
089    import org.apache.hadoop.net.NetUtils;
090    import org.apache.hadoop.security.AccessControlException;
091    import org.apache.hadoop.security.SecurityUtil;
092    import org.apache.hadoop.security.UserGroupInformation;
093    import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
094    import org.apache.hadoop.security.authentication.client.AuthenticationException;
095    import org.apache.hadoop.security.authorize.AuthorizationException;
096    import org.apache.hadoop.security.token.SecretManager.InvalidToken;
097    import org.apache.hadoop.security.token.Token;
098    import org.apache.hadoop.security.token.TokenIdentifier;
099    import org.apache.hadoop.security.token.TokenRenewer;
100    import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
101    import org.apache.hadoop.util.Progressable;
102    import org.mortbay.util.ajax.JSON;
103    
104    /** A FileSystem for HDFS over the web. */
105    public class WebHdfsFileSystem extends FileSystem
106        implements DelegationTokenRenewer.Renewable {
107      public static final Log LOG = LogFactory.getLog(WebHdfsFileSystem.class);
108      /** File System URI: {SCHEME}://namenode:port/path/to/file */
109      public static final String SCHEME = "webhdfs";
110      /** WebHdfs version. */
111      public static final int VERSION = 1;
112      /** Http URI: http://namenode:port/{PATH_PREFIX}/path/to/file */
113      public static final String PATH_PREFIX = "/" + SCHEME + "/v" + VERSION;
114    
115      /** SPNEGO authenticator */
116      private static final KerberosUgiAuthenticator AUTH = new KerberosUgiAuthenticator();
117      /** Delegation token kind */
118      public static final Text TOKEN_KIND = new Text("WEBHDFS delegation");
119      /** Token selector */
120      public static final AbstractDelegationTokenSelector<DelegationTokenIdentifier> DT_SELECTOR
121          = new AbstractDelegationTokenSelector<DelegationTokenIdentifier>(TOKEN_KIND) {};
122    
123      private static DelegationTokenRenewer<WebHdfsFileSystem> DT_RENEWER = null;
124    
125      private static synchronized void addRenewAction(final WebHdfsFileSystem webhdfs) {
126        if (DT_RENEWER == null) {
127          DT_RENEWER = new DelegationTokenRenewer<WebHdfsFileSystem>(WebHdfsFileSystem.class);
128          DT_RENEWER.start();
129        }
130    
131        DT_RENEWER.addRenewAction(webhdfs);
132      }
133    
134      /** Is WebHDFS enabled in conf? */
135      public static boolean isEnabled(final Configuration conf, final Log log) {
136        final boolean b = conf.getBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
137            DFSConfigKeys.DFS_WEBHDFS_ENABLED_DEFAULT);
138        log.info(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY + " = " + b);
139        return b;
140      }
141    
142      private final UserGroupInformation ugi;
143      private InetSocketAddress nnAddr;
144      private URI uri;
145      private Token<?> delegationToken;
146      private final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
147      private Path workingDir;
148    
149      {
150        try {
151          ugi = UserGroupInformation.getCurrentUser();
152        } catch (IOException e) {
153          throw new RuntimeException(e);
154        }
155      }
156    
157      @Override
158      public synchronized void initialize(URI uri, Configuration conf
159          ) throws IOException {
160        super.initialize(uri, conf);
161        setConf(conf);
162        try {
163          this.uri = new URI(uri.getScheme(), uri.getAuthority(), null, null, null);
164        } catch (URISyntaxException e) {
165          throw new IllegalArgumentException(e);
166        }
167        this.nnAddr = NetUtils.createSocketAddr(uri.toString());
168        this.workingDir = getHomeDirectory();
169    
170        if (UserGroupInformation.isSecurityEnabled()) {
171          initDelegationToken();
172        }
173      }
174    
175      protected void initDelegationToken() throws IOException {
176        // look for webhdfs token, then try hdfs
177        final Text serviceName = SecurityUtil.buildTokenService(nnAddr);
178        Token<?> token = DT_SELECTOR.selectToken(serviceName, ugi.getTokens());      
179        if (token == null) {
180          token = DelegationTokenSelector.selectHdfsDelegationToken(
181              nnAddr, ugi, getConf());
182        }
183    
184        //since we don't already have a token, go get one
185        boolean createdToken = false;
186        if (token == null) {
187          token = getDelegationToken(null);
188          createdToken = (token != null);
189        }
190    
191        // security might be disabled
192        if (token != null) {
193          setDelegationToken(token);
194          if (createdToken) {
195            addRenewAction(this);
196            LOG.debug("Created new DT for " + token.getService());
197          } else {
198            LOG.debug("Found existing DT for " + token.getService());        
199          }
200        }
201      }
202    
203      @Override
204      protected int getDefaultPort() {
205        return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
206            DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT);
207      }
208    
209      @Override
210      public URI getUri() {
211        return this.uri;
212      }
213    
214      /** @return the home directory. */
215      public static String getHomeDirectoryString(final UserGroupInformation ugi) {
216        return "/user/" + ugi.getShortUserName();
217      }
218    
219      @Override
220      public Path getHomeDirectory() {
221        return makeQualified(new Path(getHomeDirectoryString(ugi)));
222      }
223    
224      @Override
225      public synchronized Path getWorkingDirectory() {
226        return workingDir;
227      }
228    
229      @Override
230      public synchronized void setWorkingDirectory(final Path dir) {
231        String result = makeAbsolute(dir).toUri().getPath();
232        if (!DFSUtil.isValidName(result)) {
233          throw new IllegalArgumentException("Invalid DFS directory name " + 
234                                             result);
235        }
236        workingDir = makeAbsolute(dir);
237      }
238    
239      private Path makeAbsolute(Path f) {
240        return f.isAbsolute()? f: new Path(workingDir, f);
241      }
242    
243      static Map<?, ?> jsonParse(final InputStream in) throws IOException {
244        if (in == null) {
245          throw new IOException("The input stream is null.");
246        }
247        return (Map<?, ?>)JSON.parse(new InputStreamReader(in));
248      }
249    
250      private static Map<?, ?> validateResponse(final HttpOpParam.Op op,
251          final HttpURLConnection conn) throws IOException {
252        final int code = conn.getResponseCode();
253        if (code != op.getExpectedHttpResponseCode()) {
254          final Map<?, ?> m;
255          try {
256            m = jsonParse(conn.getErrorStream());
257          } catch(IOException e) {
258            throw new IOException("Unexpected HTTP response: code=" + code + " != "
259                + op.getExpectedHttpResponseCode() + ", " + op.toQueryString()
260                + ", message=" + conn.getResponseMessage(), e);
261          }
262    
263          if (m.get(RemoteException.class.getSimpleName()) == null) {
264            return m;
265          }
266    
267          final RemoteException re = JsonUtil.toRemoteException(m);
268          throw re.unwrapRemoteException(AccessControlException.class,
269              InvalidToken.class,
270              AuthenticationException.class,
271              AuthorizationException.class,
272              FileAlreadyExistsException.class,
273              FileNotFoundException.class,
274              ParentNotDirectoryException.class,
275              UnresolvedPathException.class,
276              SafeModeException.class,
277              DSQuotaExceededException.class,
278              NSQuotaExceededException.class);
279        }
280        return null;
281      }
282    
283      /**
284       * Return a URL pointing to given path on the namenode.
285       *
286       * @param path to obtain the URL for
287       * @param query string to append to the path
288       * @return namenode URL referring to the given path
289       * @throws IOException on error constructing the URL
290       */
291      private URL getNamenodeURL(String path, String query) throws IOException {
292        final URL url = new URL("http", nnAddr.getHostName(),
293              nnAddr.getPort(), path + '?' + query);
294        if (LOG.isTraceEnabled()) {
295          LOG.trace("url=" + url);
296        }
297        return url;
298      }
299      
300      private String addDt2Query(String query) throws IOException {
301        if (UserGroupInformation.isSecurityEnabled()) {
302          synchronized (this) {
303            if (delegationToken != null) {
304              final String encoded = delegationToken.encodeToUrlString();
305              return query + JspHelper.getDelegationTokenUrlParam(encoded);
306            } // else we are talking to an insecure cluster
307          }
308        }
309        return query;
310      }
311    
312      URL toUrl(final HttpOpParam.Op op, final Path fspath,
313          final Param<?,?>... parameters) throws IOException {
314        //initialize URI path and query
315        final String path = PATH_PREFIX
316            + (fspath == null? "/": makeQualified(fspath).toUri().getPath());
317        final String query = op.toQueryString()
318            + '&' + new UserParam(ugi)
319            + Param.toSortedString("&", parameters);
320        final URL url;
321        if (op == PutOpParam.Op.RENEWDELEGATIONTOKEN
322            || op == GetOpParam.Op.GETDELEGATIONTOKEN
323            || op == GetOpParam.Op.GETDELEGATIONTOKENS) {
324          // Skip adding delegation token for getting or renewing delegation token,
325          // because these operations require kerberos authentication.
326          url = getNamenodeURL(path, query);
327        } else {
328          url = getNamenodeURL(path, addDt2Query(query));
329        }
330        if (LOG.isTraceEnabled()) {
331          LOG.trace("url=" + url);
332        }
333        return url;
334      }
335    
336      private HttpURLConnection getHttpUrlConnection(URL url)
337          throws IOException {
338        final HttpURLConnection conn;
339        try {
340          if (ugi.hasKerberosCredentials()) { 
341            conn = new AuthenticatedURL(AUTH).openConnection(url, authToken);
342          } else {
343            conn = (HttpURLConnection)url.openConnection();
344          }
345        } catch (AuthenticationException e) {
346          throw new IOException("Authentication failed, url=" + url, e);
347        }
348        return conn;
349      }
350      
351      private HttpURLConnection httpConnect(final HttpOpParam.Op op, final Path fspath,
352          final Param<?,?>... parameters) throws IOException {
353        final URL url = toUrl(op, fspath, parameters);
354    
355        //connect and get response
356        HttpURLConnection conn = getHttpUrlConnection(url);
357        try {
358          conn.setRequestMethod(op.getType().toString());
359          if (op.getDoOutput()) {
360            conn = twoStepWrite(conn, op);
361            conn.setRequestProperty("Content-Type", "application/octet-stream");
362          }
363          conn.setDoOutput(op.getDoOutput());
364          conn.connect();
365          return conn;
366        } catch (IOException e) {
367          conn.disconnect();
368          throw e;
369        }
370      }
371      
372      /**
373       * Two-step Create/Append:
374       * Step 1) Submit a Http request with neither auto-redirect nor data. 
375       * Step 2) Submit another Http request with the URL from the Location header with data.
376       * 
377       * The reason of having two-step create/append is for preventing clients to
378       * send out the data before the redirect. This issue is addressed by the
379       * "Expect: 100-continue" header in HTTP/1.1; see RFC 2616, Section 8.2.3.
380       * Unfortunately, there are software library bugs (e.g. Jetty 6 http server
381       * and Java 6 http client), which do not correctly implement "Expect:
382       * 100-continue". The two-step create/append is a temporary workaround for
383       * the software library bugs.
384       */
385      static HttpURLConnection twoStepWrite(HttpURLConnection conn,
386          final HttpOpParam.Op op) throws IOException {
387        //Step 1) Submit a Http request with neither auto-redirect nor data. 
388        conn.setInstanceFollowRedirects(false);
389        conn.setDoOutput(false);
390        conn.connect();
391        validateResponse(HttpOpParam.TemporaryRedirectOp.valueOf(op), conn);
392        final String redirect = conn.getHeaderField("Location");
393        conn.disconnect();
394    
395        //Step 2) Submit another Http request with the URL from the Location header with data.
396        conn = (HttpURLConnection)new URL(redirect).openConnection();
397        conn.setRequestMethod(op.getType().toString());
398        return conn;
399      }
400    
401      /**
402       * Run a http operation.
403       * Connect to the http server, validate response, and obtain the JSON output.
404       * 
405       * @param op http operation
406       * @param fspath file system path
407       * @param parameters parameters for the operation
408       * @return a JSON object, e.g. Object[], Map<?, ?>, etc.
409       * @throws IOException
410       */
411      private Map<?, ?> run(final HttpOpParam.Op op, final Path fspath,
412          final Param<?,?>... parameters) throws IOException {
413        final HttpURLConnection conn = httpConnect(op, fspath, parameters);
414        try {
415          final Map<?, ?> m = validateResponse(op, conn);
416          return m != null? m: jsonParse(conn.getInputStream());
417        } finally {
418          conn.disconnect();
419        }
420      }
421    
422      private FsPermission applyUMask(FsPermission permission) {
423        if (permission == null) {
424          permission = FsPermission.getDefault();
425        }
426        return permission.applyUMask(FsPermission.getUMask(getConf()));
427      }
428    
429      private HdfsFileStatus getHdfsFileStatus(Path f) throws IOException {
430        final HttpOpParam.Op op = GetOpParam.Op.GETFILESTATUS;
431        final Map<?, ?> json = run(op, f);
432        final HdfsFileStatus status = JsonUtil.toFileStatus(json, true);
433        if (status == null) {
434          throw new FileNotFoundException("File does not exist: " + f);
435        }
436        return status;
437      }
438    
439      @Override
440      public FileStatus getFileStatus(Path f) throws IOException {
441        statistics.incrementReadOps(1);
442        return makeQualified(getHdfsFileStatus(f), f);
443      }
444    
445      private FileStatus makeQualified(HdfsFileStatus f, Path parent) {
446        return new FileStatus(f.getLen(), f.isDir(), f.getReplication(),
447            f.getBlockSize(), f.getModificationTime(), f.getAccessTime(),
448            f.getPermission(), f.getOwner(), f.getGroup(),
449            f.isSymlink() ? new Path(f.getSymlink()) : null,
450            f.getFullPath(parent).makeQualified(getUri(), getWorkingDirectory()));
451      }
452    
453      @Override
454      public boolean mkdirs(Path f, FsPermission permission) throws IOException {
455        statistics.incrementWriteOps(1);
456        final HttpOpParam.Op op = PutOpParam.Op.MKDIRS;
457        final Map<?, ?> json = run(op, f,
458            new PermissionParam(applyUMask(permission)));
459        return (Boolean)json.get("boolean");
460      }
461    
462      /**
463       * Create a symlink pointing to the destination path.
464       * @see org.apache.hadoop.fs.Hdfs#createSymlink(Path, Path, boolean) 
465       */
466      public void createSymlink(Path destination, Path f, boolean createParent
467          ) throws IOException {
468        statistics.incrementWriteOps(1);
469        final HttpOpParam.Op op = PutOpParam.Op.CREATESYMLINK;
470        run(op, f, new DestinationParam(makeQualified(destination).toUri().getPath()),
471            new CreateParentParam(createParent));
472      }
473    
474      @Override
475      public boolean rename(final Path src, final Path dst) throws IOException {
476        statistics.incrementWriteOps(1);
477        final HttpOpParam.Op op = PutOpParam.Op.RENAME;
478        final Map<?, ?> json = run(op, src,
479            new DestinationParam(makeQualified(dst).toUri().getPath()));
480        return (Boolean)json.get("boolean");
481      }
482    
483      @SuppressWarnings("deprecation")
484      @Override
485      public void rename(final Path src, final Path dst,
486          final Options.Rename... options) throws IOException {
487        statistics.incrementWriteOps(1);
488        final HttpOpParam.Op op = PutOpParam.Op.RENAME;
489        run(op, src, new DestinationParam(makeQualified(dst).toUri().getPath()),
490            new RenameOptionSetParam(options));
491      }
492    
493      @Override
494      public void setOwner(final Path p, final String owner, final String group
495          ) throws IOException {
496        if (owner == null && group == null) {
497          throw new IOException("owner == null && group == null");
498        }
499    
500        statistics.incrementWriteOps(1);
501        final HttpOpParam.Op op = PutOpParam.Op.SETOWNER;
502        run(op, p, new OwnerParam(owner), new GroupParam(group));
503      }
504    
505      @Override
506      public void setPermission(final Path p, final FsPermission permission
507          ) throws IOException {
508        statistics.incrementWriteOps(1);
509        final HttpOpParam.Op op = PutOpParam.Op.SETPERMISSION;
510        run(op, p, new PermissionParam(permission));
511      }
512    
513      @Override
514      public boolean setReplication(final Path p, final short replication
515         ) throws IOException {
516        statistics.incrementWriteOps(1);
517        final HttpOpParam.Op op = PutOpParam.Op.SETREPLICATION;
518        final Map<?, ?> json = run(op, p, new ReplicationParam(replication));
519        return (Boolean)json.get("boolean");
520      }
521    
522      @Override
523      public void setTimes(final Path p, final long mtime, final long atime
524          ) throws IOException {
525        statistics.incrementWriteOps(1);
526        final HttpOpParam.Op op = PutOpParam.Op.SETTIMES;
527        run(op, p, new ModificationTimeParam(mtime), new AccessTimeParam(atime));
528      }
529    
530      @Override
531      public long getDefaultBlockSize() {
532        return getConf().getLongBytes(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
533            DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
534      }
535    
536      @Override
537      public short getDefaultReplication() {
538        return (short)getConf().getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
539            DFSConfigKeys.DFS_REPLICATION_DEFAULT);
540      }
541    
542      FSDataOutputStream write(final HttpOpParam.Op op,
543          final HttpURLConnection conn, final int bufferSize) throws IOException {
544        return new FSDataOutputStream(new BufferedOutputStream(
545            conn.getOutputStream(), bufferSize), statistics) {
546          @Override
547          public void close() throws IOException {
548            try {
549              super.close();
550            } finally {
551              try {
552                validateResponse(op, conn);
553              } finally {
554                conn.disconnect();
555              }
556            }
557          }
558        };
559      }
560    
561      @Override
562      public FSDataOutputStream create(final Path f, final FsPermission permission,
563          final boolean overwrite, final int bufferSize, final short replication,
564          final long blockSize, final Progressable progress) throws IOException {
565        statistics.incrementWriteOps(1);
566    
567        final HttpOpParam.Op op = PutOpParam.Op.CREATE;
568        final HttpURLConnection conn = httpConnect(op, f, 
569            new PermissionParam(applyUMask(permission)),
570            new OverwriteParam(overwrite),
571            new BufferSizeParam(bufferSize),
572            new ReplicationParam(replication),
573            new BlockSizeParam(blockSize));
574        return write(op, conn, bufferSize);
575      }
576    
577      @Override
578      public FSDataOutputStream append(final Path f, final int bufferSize,
579          final Progressable progress) throws IOException {
580        statistics.incrementWriteOps(1);
581    
582        final HttpOpParam.Op op = PostOpParam.Op.APPEND;
583        final HttpURLConnection conn = httpConnect(op, f, 
584            new BufferSizeParam(bufferSize));
585        return write(op, conn, bufferSize);
586      }
587    
588      @SuppressWarnings("deprecation")
589      @Override
590      public boolean delete(final Path f) throws IOException {
591        return delete(f, true);
592      }
593    
594      @Override
595      public boolean delete(Path f, boolean recursive) throws IOException {
596        final HttpOpParam.Op op = DeleteOpParam.Op.DELETE;
597        final Map<?, ?> json = run(op, f, new RecursiveParam(recursive));
598        return (Boolean)json.get("boolean");
599      }
600    
601      @Override
602      public FSDataInputStream open(final Path f, final int buffersize
603          ) throws IOException {
604        statistics.incrementReadOps(1);
605        final HttpOpParam.Op op = GetOpParam.Op.OPEN;
606        final URL url = toUrl(op, f, new BufferSizeParam(buffersize));
607        return new FSDataInputStream(new OffsetUrlInputStream(
608            new OffsetUrlOpener(url), new OffsetUrlOpener(null)));
609      }
610    
611      class OffsetUrlOpener extends ByteRangeInputStream.URLOpener {
612        /** The url with offset parameter */
613        private URL offsetUrl;
614      
615        OffsetUrlOpener(final URL url) {
616          super(url);
617        }
618    
619        /** Open connection with offset url. */
620        @Override
621        protected HttpURLConnection openConnection() throws IOException {
622          return getHttpUrlConnection(offsetUrl);
623        }
624    
625        /** Setup offset url before open connection. */
626        @Override
627        protected HttpURLConnection openConnection(final long offset) throws IOException {
628          offsetUrl = offset == 0L? url: new URL(url + "&" + new OffsetParam(offset));
629          final HttpURLConnection conn = openConnection();
630          conn.setRequestMethod("GET");
631          return conn;
632        }  
633      }
634    
635      private static final String OFFSET_PARAM_PREFIX = OffsetParam.NAME + "=";
636    
637      /** Remove offset parameter, if there is any, from the url */
638      static URL removeOffsetParam(final URL url) throws MalformedURLException {
639        String query = url.getQuery();
640        if (query == null) {
641          return url;
642        }
643        final String lower = query.toLowerCase();
644        if (!lower.startsWith(OFFSET_PARAM_PREFIX)
645            && !lower.contains("&" + OFFSET_PARAM_PREFIX)) {
646          return url;
647        }
648    
649        //rebuild query
650        StringBuilder b = null;
651        for(final StringTokenizer st = new StringTokenizer(query, "&");
652            st.hasMoreTokens();) {
653          final String token = st.nextToken();
654          if (!token.toLowerCase().startsWith(OFFSET_PARAM_PREFIX)) {
655            if (b == null) {
656              b = new StringBuilder("?").append(token);
657            } else {
658              b.append('&').append(token);
659            }
660          }
661        }
662        query = b == null? "": b.toString();
663    
664        final String urlStr = url.toString();
665        return new URL(urlStr.substring(0, urlStr.indexOf('?')) + query);
666      }
667    
668      static class OffsetUrlInputStream extends ByteRangeInputStream {
669        OffsetUrlInputStream(OffsetUrlOpener o, OffsetUrlOpener r) {
670          super(o, r);
671        }
672        
673        @Override
674        protected void checkResponseCode(final HttpURLConnection connection
675            ) throws IOException {
676          validateResponse(GetOpParam.Op.OPEN, connection);
677        }
678    
679        /** Remove offset parameter before returning the resolved url. */
680        @Override
681        protected URL getResolvedUrl(final HttpURLConnection connection
682            ) throws MalformedURLException {
683          return removeOffsetParam(connection.getURL());
684        }
685      }
686    
687      @Override
688      public FileStatus[] listStatus(final Path f) throws IOException {
689        statistics.incrementReadOps(1);
690    
691        final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS;
692        final Map<?, ?> json  = run(op, f);
693        final Map<?, ?> rootmap = (Map<?, ?>)json.get(FileStatus.class.getSimpleName() + "es");
694        final Object[] array = (Object[])rootmap.get(FileStatus.class.getSimpleName());
695    
696        //convert FileStatus
697        final FileStatus[] statuses = new FileStatus[array.length];
698        for(int i = 0; i < array.length; i++) {
699          final Map<?, ?> m = (Map<?, ?>)array[i];
700          statuses[i] = makeQualified(JsonUtil.toFileStatus(m, false), f);
701        }
702        return statuses;
703      }
704    
705      @SuppressWarnings("deprecation")
706      @Override
707      public Token<DelegationTokenIdentifier> getDelegationToken(final String renewer
708          ) throws IOException {
709        final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKEN;
710        final Map<?, ?> m = run(op, null, new RenewerParam(renewer));
711        final Token<DelegationTokenIdentifier> token = JsonUtil.toDelegationToken(m); 
712        SecurityUtil.setTokenService(token, nnAddr);
713        return token;
714      }
715    
716      @Override
717      public List<Token<?>> getDelegationTokens(final String renewer
718          ) throws IOException {
719        final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKENS;
720        final Map<?, ?> m = run(op, null, new RenewerParam(renewer));
721        final List<Token<?>> tokens = JsonUtil.toTokenList(m);
722        for(Token<?> t : tokens) {
723          SecurityUtil.setTokenService(t, nnAddr);
724        }
725        return tokens;
726      }
727    
728      @Override
729      public Token<?> getRenewToken() {
730        return delegationToken;
731      }
732    
733      @Override
734      public <T extends TokenIdentifier> void setDelegationToken(
735          final Token<T> token) {
736        synchronized(this) {
737          delegationToken = token;
738        }
739      }
740    
741      private synchronized long renewDelegationToken(final Token<?> token
742          ) throws IOException {
743        final HttpOpParam.Op op = PutOpParam.Op.RENEWDELEGATIONTOKEN;
744        TokenArgumentParam dtargParam = new TokenArgumentParam(
745            token.encodeToUrlString());
746        final Map<?, ?> m = run(op, null, dtargParam);
747        return (Long) m.get("long");
748      }
749    
750      private synchronized void cancelDelegationToken(final Token<?> token
751          ) throws IOException {
752        final HttpOpParam.Op op = PutOpParam.Op.CANCELDELEGATIONTOKEN;
753        TokenArgumentParam dtargParam = new TokenArgumentParam(
754            token.encodeToUrlString());
755        run(op, null, dtargParam);
756      }
757      
758      @Override
759      public BlockLocation[] getFileBlockLocations(final FileStatus status,
760          final long offset, final long length) throws IOException {
761        if (status == null) {
762          return null;
763        }
764        return getFileBlockLocations(status.getPath(), offset, length);
765      }
766    
767      @Override
768      public BlockLocation[] getFileBlockLocations(final Path p, 
769          final long offset, final long length) throws IOException {
770        statistics.incrementReadOps(1);
771    
772        final HttpOpParam.Op op = GetOpParam.Op.GET_BLOCK_LOCATIONS;
773        final Map<?, ?> m = run(op, p, new OffsetParam(offset),
774            new LengthParam(length));
775        return DFSUtil.locatedBlocks2Locations(JsonUtil.toLocatedBlocks(m));
776      }
777    
778      @Override
779      public ContentSummary getContentSummary(final Path p) throws IOException {
780        statistics.incrementReadOps(1);
781    
782        final HttpOpParam.Op op = GetOpParam.Op.GETCONTENTSUMMARY;
783        final Map<?, ?> m = run(op, p);
784        return JsonUtil.toContentSummary(m);
785      }
786    
787      @Override
788      public MD5MD5CRC32FileChecksum getFileChecksum(final Path p
789          ) throws IOException {
790        statistics.incrementReadOps(1);
791      
792        final HttpOpParam.Op op = GetOpParam.Op.GETFILECHECKSUM;
793        final Map<?, ?> m = run(op, p);
794        return JsonUtil.toMD5MD5CRC32FileChecksum(m);
795      }
796    
797      /** Delegation token renewer. */
798      public static class DtRenewer extends TokenRenewer {
799        @Override
800        public boolean handleKind(Text kind) {
801          return kind.equals(TOKEN_KIND);
802        }
803      
804        @Override
805        public boolean isManaged(Token<?> token) throws IOException {
806          return true;
807        }
808    
809        private static WebHdfsFileSystem getWebHdfs(
810            final Token<?> token, final Configuration conf
811            ) throws IOException, InterruptedException, URISyntaxException {
812          
813          final InetSocketAddress nnAddr = SecurityUtil.getTokenServiceAddr(token);
814          final URI uri = DFSUtil.createUri(WebHdfsFileSystem.SCHEME, nnAddr);
815          return (WebHdfsFileSystem)FileSystem.get(uri, conf);
816        }
817    
818        @Override
819        public long renew(final Token<?> token, final Configuration conf
820            ) throws IOException, InterruptedException {
821          final UserGroupInformation ugi = UserGroupInformation.getLoginUser();
822          // update the kerberos credentials, if they are coming from a keytab
823          ugi.reloginFromKeytab();
824    
825          try {
826            WebHdfsFileSystem webhdfs = getWebHdfs(token, conf);
827            return webhdfs.renewDelegationToken(token);
828          } catch (URISyntaxException e) {
829            throw new IOException(e);
830          }
831        }
832      
833        @Override
834        public void cancel(final Token<?> token, final Configuration conf
835            ) throws IOException, InterruptedException {
836          final UserGroupInformation ugi = UserGroupInformation.getLoginUser();
837          // update the kerberos credentials, if they are coming from a keytab
838          ugi.checkTGTAndReloginFromKeytab();
839    
840          try {
841            final WebHdfsFileSystem webhdfs = getWebHdfs(token, conf);
842            webhdfs.cancelDelegationToken(token);
843          } catch (URISyntaxException e) {
844            throw new IOException(e);
845          }
846        }
847      }
848    }