001/**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements.  See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership.  The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License.  You may obtain a copy of the License at
009 *
010 *     http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018package org.apache.hadoop.hdfs.client;
019
020import java.io.FileNotFoundException;
021import java.io.IOException;
022import java.net.URI;
023import java.util.EnumSet;
024
025import org.apache.hadoop.classification.InterfaceAudience;
026import org.apache.hadoop.classification.InterfaceStability;
027import org.apache.hadoop.conf.Configuration;
028import org.apache.hadoop.fs.CacheFlag;
029import org.apache.hadoop.fs.FileSystem;
030import org.apache.hadoop.fs.Path;
031import org.apache.hadoop.fs.RemoteIterator;
032import org.apache.hadoop.hdfs.DFSInotifyEventInputStream;
033import org.apache.hadoop.hdfs.DistributedFileSystem;
034import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
035import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
036import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
037import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
038import org.apache.hadoop.hdfs.protocol.EncryptionZone;
039import org.apache.hadoop.hdfs.protocol.HdfsConstants;
040import org.apache.hadoop.security.AccessControlException;
041import org.apache.hadoop.hdfs.tools.DFSAdmin;
042
043/**
044 * The public API for performing administrative functions on HDFS. Those writing
045 * applications against HDFS should prefer this interface to directly accessing
046 * functionality in DistributedFileSystem or DFSClient.
047 * 
048 * Note that this is distinct from the similarly-named {@link DFSAdmin}, which
049 * is a class that provides the functionality for the CLI `hdfs dfsadmin ...'
050 * commands.
051 */
052@InterfaceAudience.Public
053@InterfaceStability.Evolving
054public class HdfsAdmin {
055  
056  private DistributedFileSystem dfs;
057  
058  /**
059   * Create a new HdfsAdmin client.
060   * 
061   * @param uri the unique URI of the HDFS file system to administer
062   * @param conf configuration
063   * @throws IOException in the event the file system could not be created
064   */
065  public HdfsAdmin(URI uri, Configuration conf) throws IOException {
066    FileSystem fs = FileSystem.get(uri, conf);
067    if (!(fs instanceof DistributedFileSystem)) {
068      throw new IllegalArgumentException("'" + uri + "' is not an HDFS URI.");
069    } else {
070      dfs = (DistributedFileSystem)fs;
071    }
072  }
073  
074  /**
075   * Set the namespace quota (count of files, directories, and sym links) for a
076   * directory.
077   * 
078   * @param src the path to set the quota for
079   * @param quota the value to set for the quota
080   * @throws IOException in the event of error
081   */
082  public void setQuota(Path src, long quota) throws IOException {
083    dfs.setQuota(src, quota, HdfsConstants.QUOTA_DONT_SET);
084  }
085  
086  /**
087   * Clear the namespace quota (count of files, directories and sym links) for a
088   * directory.
089   * 
090   * @param src the path to clear the quota of
091   * @throws IOException in the event of error
092   */
093  public void clearQuota(Path src) throws IOException {
094    dfs.setQuota(src, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_DONT_SET);
095  }
096  
097  /**
098   * Set the disk space quota (size of files) for a directory. Note that
099   * directories and sym links do not occupy disk space.
100   * 
101   * @param src the path to set the space quota of
102   * @param spaceQuota the value to set for the space quota
103   * @throws IOException in the event of error
104   */
105  public void setSpaceQuota(Path src, long spaceQuota) throws IOException {
106    dfs.setQuota(src, HdfsConstants.QUOTA_DONT_SET, spaceQuota);
107  }
108  
109  /**
110   * Clear the disk space quota (size of files) for a directory. Note that
111   * directories and sym links do not occupy disk space.
112   * 
113   * @param src the path to clear the space quota of
114   * @throws IOException in the event of error
115   */
116  public void clearSpaceQuota(Path src) throws IOException {
117    dfs.setQuota(src, HdfsConstants.QUOTA_DONT_SET, HdfsConstants.QUOTA_RESET);
118  }
119  
120  /**
121   * Allow snapshot on a directory.
122   * @param path The path of the directory where snapshots will be taken.
123   */
124  public void allowSnapshot(Path path) throws IOException {
125    dfs.allowSnapshot(path);
126  }
127  
128  /**
129   * Disallow snapshot on a directory.
130   * @param path The path of the snapshottable directory.
131   */
132  public void disallowSnapshot(Path path) throws IOException {
133    dfs.disallowSnapshot(path);
134  }
135
136  /**
137   * Add a new CacheDirectiveInfo.
138   * 
139   * @param info Information about a directive to add.
140   * @param flags {@link CacheFlag}s to use for this operation.
141   * @return the ID of the directive that was created.
142   * @throws IOException if the directive could not be added
143   */
144  public long addCacheDirective(CacheDirectiveInfo info,
145      EnumSet<CacheFlag> flags) throws IOException {
146  return dfs.addCacheDirective(info, flags);
147  }
148  
149  /**
150   * Modify a CacheDirective.
151   * 
152   * @param info Information about the directive to modify. You must set the ID
153   *          to indicate which CacheDirective you want to modify.
154   * @param flags {@link CacheFlag}s to use for this operation.
155   * @throws IOException if the directive could not be modified
156   */
157  public void modifyCacheDirective(CacheDirectiveInfo info,
158      EnumSet<CacheFlag> flags) throws IOException {
159    dfs.modifyCacheDirective(info, flags);
160  }
161
162  /**
163   * Remove a CacheDirective.
164   * 
165   * @param id identifier of the CacheDirectiveInfo to remove
166   * @throws IOException if the directive could not be removed
167   */
168  public void removeCacheDirective(long id)
169      throws IOException {
170    dfs.removeCacheDirective(id);
171  }
172
173  /**
174   * List cache directives. Incrementally fetches results from the server.
175   * 
176   * @param filter Filter parameters to use when listing the directives, null to
177   *               list all directives visible to us.
178   * @return A RemoteIterator which returns CacheDirectiveInfo objects.
179   */
180  public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
181      CacheDirectiveInfo filter) throws IOException {
182    return dfs.listCacheDirectives(filter);
183  }
184
185  /**
186   * Add a cache pool.
187   *
188   * @param info
189   *          The request to add a cache pool.
190   * @throws IOException 
191   *          If the request could not be completed.
192   */
193  public void addCachePool(CachePoolInfo info) throws IOException {
194    dfs.addCachePool(info);
195  }
196
197  /**
198   * Modify an existing cache pool.
199   *
200   * @param info
201   *          The request to modify a cache pool.
202   * @throws IOException 
203   *          If the request could not be completed.
204   */
205  public void modifyCachePool(CachePoolInfo info) throws IOException {
206    dfs.modifyCachePool(info);
207  }
208    
209  /**
210   * Remove a cache pool.
211   *
212   * @param poolName
213   *          Name of the cache pool to remove.
214   * @throws IOException 
215   *          if the cache pool did not exist, or could not be removed.
216   */
217  public void removeCachePool(String poolName) throws IOException {
218    dfs.removeCachePool(poolName);
219  }
220
221  /**
222   * List all cache pools.
223   *
224   * @return A remote iterator from which you can get CachePoolEntry objects.
225   *          Requests will be made as needed.
226   * @throws IOException
227   *          If there was an error listing cache pools.
228   */
229  public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
230    return dfs.listCachePools();
231  }
232
233  /**
234   * Create an encryption zone rooted at an empty existing directory, using the
235   * specified encryption key. An encryption zone has an associated encryption
236   * key used when reading and writing files within the zone.
237   *
238   * @param path    The path of the root of the encryption zone. Must refer to
239   *                an empty, existing directory.
240   * @param keyName Name of key available at the KeyProvider.
241   * @throws IOException            if there was a general IO exception
242   * @throws AccessControlException if the caller does not have access to path
243   * @throws FileNotFoundException  if the path does not exist
244   */
245  public void createEncryptionZone(Path path, String keyName)
246    throws IOException, AccessControlException, FileNotFoundException {
247    dfs.createEncryptionZone(path, keyName);
248  }
249
250  /**
251   * Get the path of the encryption zone for a given file or directory.
252   *
253   * @param path The path to get the ez for.
254   *
255   * @return The EncryptionZone of the ez, or null if path is not in an ez.
256   * @throws IOException            if there was a general IO exception
257   * @throws AccessControlException if the caller does not have access to path
258   * @throws FileNotFoundException  if the path does not exist
259   */
260  public EncryptionZone getEncryptionZoneForPath(Path path)
261    throws IOException, AccessControlException, FileNotFoundException {
262    return dfs.getEZForPath(path);
263  }
264
265  /**
266   * Returns a RemoteIterator which can be used to list the encryption zones
267   * in HDFS. For large numbers of encryption zones, the iterator will fetch
268   * the list of zones in a number of small batches.
269   * <p/>
270   * Since the list is fetched in batches, it does not represent a
271   * consistent snapshot of the entire list of encryption zones.
272   * <p/>
273   * This method can only be called by HDFS superusers.
274   */
275  public RemoteIterator<EncryptionZone> listEncryptionZones()
276      throws IOException {
277    return dfs.listEncryptionZones();
278  }
279
280  /**
281   * Exposes a stream of namesystem events. Only events occurring after the
282   * stream is created are available.
283   * See {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream}
284   * for information on stream usage.
285   * See {@link org.apache.hadoop.hdfs.inotify.Event}
286   * for information on the available events.
287   * <p/>
288   * Inotify users may want to tune the following HDFS parameters to
289   * ensure that enough extra HDFS edits are saved to support inotify clients
290   * that fall behind the current state of the namespace while reading events.
291   * The default parameter values should generally be reasonable. If edits are
292   * deleted before their corresponding events can be read, clients will see a
293   * {@link org.apache.hadoop.hdfs.inotify.MissingEventsException} on
294   * {@link org.apache.hadoop.hdfs.DFSInotifyEventInputStream} method calls.
295   *
296   * It should generally be sufficient to tune these parameters:
297   * dfs.namenode.num.extra.edits.retained
298   * dfs.namenode.max.extra.edits.segments.retained
299   *
300   * Parameters that affect the number of created segments and the number of
301   * edits that are considered necessary, i.e. do not count towards the
302   * dfs.namenode.num.extra.edits.retained quota):
303   * dfs.namenode.checkpoint.period
304   * dfs.namenode.checkpoint.txns
305   * dfs.namenode.num.checkpoints.retained
306   * dfs.ha.log-roll.period
307   * <p/>
308   * It is recommended that local journaling be configured
309   * (dfs.namenode.edits.dir) for inotify (in addition to a shared journal)
310   * so that edit transfers from the shared journal can be avoided.
311   *
312   * @throws IOException If there was an error obtaining the stream.
313   */
314  public DFSInotifyEventInputStream getInotifyEventStream() throws IOException {
315    return dfs.getInotifyEventStream();
316  }
317
318  /**
319   * A version of {@link HdfsAdmin#getInotifyEventStream()} meant for advanced
320   * users who are aware of HDFS edits up to lastReadTxid (e.g. because they
321   * have access to an FSImage inclusive of lastReadTxid) and only want to read
322   * events after this point.
323   */
324  public DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid)
325      throws IOException {
326    return dfs.getInotifyEventStream(lastReadTxid);
327  }
328
329  /**
330   * Set the source path to the specified storage policy.
331   *
332   * @param src The source path referring to either a directory or a file.
333   * @param policyName The name of the storage policy.
334   */
335  public void setStoragePolicy(final Path src, final String policyName)
336      throws IOException {
337    dfs.setStoragePolicy(src, policyName);
338  }
339}