|
||||||||||
PREV CLASS NEXT CLASS | FRAMES NO FRAMES | |||||||||
SUMMARY: NESTED | FIELD | CONSTR | METHOD | DETAIL: FIELD | CONSTR | METHOD |
java.lang.Objectorg.apache.hadoop.hdfs.server.namenode.INode
org.apache.hadoop.hdfs.server.namenode.INodeWithAdditionalFields
org.apache.hadoop.hdfs.server.namenode.INodeDirectory
public class INodeDirectory
Directory INode class.
Nested Class Summary | |
---|---|
protected static class |
INodeDirectory.SnapshotAndINode
A pair of Snapshot and INode objects. |
Nested classes/interfaces inherited from class org.apache.hadoop.hdfs.server.namenode.INode |
---|
INode.BlocksMapUpdateInfo |
Nested classes/interfaces inherited from interface org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes |
---|
INodeDirectoryAttributes.CopyWithQuota, INodeDirectoryAttributes.SnapshotCopy |
Field Summary | |
---|---|
protected static int |
DEFAULT_FILES_PER_DIRECTORY
|
Fields inherited from class org.apache.hadoop.hdfs.server.namenode.INode |
---|
LOG |
Constructor Summary | |
---|---|
INodeDirectory(INodeDirectory other,
boolean adopt)
Copy constructor |
|
INodeDirectory(long id,
byte[] name,
org.apache.hadoop.fs.permission.PermissionStatus permissions,
long mtime)
constructor |
Method Summary | |
---|---|
boolean |
addChild(org.apache.hadoop.hdfs.server.namenode.INode node)
The same as addChild(node, false, null, false) |
boolean |
addChild(org.apache.hadoop.hdfs.server.namenode.INode node,
boolean setModTime,
org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot latest,
INodeMap inodeMap)
Add a child inode to the directory. |
INodeDirectory |
asDirectory()
Cast this inode to an INodeDirectory . |
Quota.Counts |
cleanSubtree(org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot snapshot,
org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot prior,
INode.BlocksMapUpdateInfo collectedBlocks,
List<org.apache.hadoop.hdfs.server.namenode.INode> removedINodes,
boolean countDiffChange)
Clean the subtree under this inode and collect the blocks from the descents for further block deletion/update. |
Quota.Counts |
cleanSubtreeRecursively(org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot snapshot,
org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot prior,
INode.BlocksMapUpdateInfo collectedBlocks,
List<org.apache.hadoop.hdfs.server.namenode.INode> removedINodes,
Map<org.apache.hadoop.hdfs.server.namenode.INode,org.apache.hadoop.hdfs.server.namenode.INode> excludedNodes,
boolean countDiffChange)
Call cleanSubtree(..) recursively down the subtree. |
void |
clear()
Clear references to other objects. |
void |
clearChildren()
Set the children list to null. |
Content.Counts |
computeContentSummary(Content.Counts counts)
Count subtree content summary with a Content.Counts . |
Quota.Counts |
computeQuotaUsage(Quota.Counts counts,
boolean useCache,
int lastSnapshotId)
Count subtree Quota.NAMESPACE and Quota.DISKSPACE usages. |
Quota.Counts |
computeQuotaUsage4CurrentDirectory(Quota.Counts counts)
Add quota usage for this inode excluding children. |
void |
destroyAndCollectBlocks(INode.BlocksMapUpdateInfo collectedBlocks,
List<org.apache.hadoop.hdfs.server.namenode.INode> removedINodes)
Destroy self and clear everything! If the INode is a file, this method collects its blocks for further block deletion. |
protected static void |
dumpTreeRecursively(PrintWriter out,
StringBuilder prefix,
Iterable<INodeDirectory.SnapshotAndINode> subs)
Dump the given subtrees. |
void |
dumpTreeRecursively(PrintWriter out,
StringBuilder prefix,
org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot snapshot)
Dump tree recursively. |
org.apache.hadoop.hdfs.server.namenode.INode |
getChild(byte[] name,
org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot snapshot)
|
org.apache.hadoop.hdfs.util.ReadOnlyList<org.apache.hadoop.hdfs.server.namenode.INode> |
getChildrenList(org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot snapshot)
|
int |
getChildrenNum(org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot snapshot)
|
boolean |
isDirectory()
Check whether it's a directory |
boolean |
isSnapshottable()
Is this a snapshottable directory? |
boolean |
metadataEquals(org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes other)
Compare the metadata with another INodeDirectory |
INodeDirectory |
recordModification(org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot latest,
INodeMap inodeMap)
This inode is being modified. |
protected boolean |
removeChild(org.apache.hadoop.hdfs.server.namenode.INode child)
Remove the specified child from this directory. |
boolean |
removeChild(org.apache.hadoop.hdfs.server.namenode.INode child,
org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot latest,
INodeMap inodeMap)
Remove the specified child from this directory. |
void |
replaceChild(org.apache.hadoop.hdfs.server.namenode.INode oldChild,
org.apache.hadoop.hdfs.server.namenode.INode newChild,
INodeMap inodeMap)
Replace the given child with a new child. |
INodeDirectory |
replaceSelf4INodeDirectory(INodeMap inodeMap)
Replace itself with INodeDirectory . |
org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable |
replaceSelf4INodeDirectorySnapshottable(org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot latest,
INodeMap inodeMap)
Replace itself with an INodeDirectorySnapshottable . |
INodeDirectoryWithSnapshot |
replaceSelf4INodeDirectoryWithSnapshot(INodeMap inodeMap)
Replace itself with an INodeDirectoryWithSnapshot . |
org.apache.hadoop.hdfs.server.namenode.INode |
saveChild2Snapshot(org.apache.hadoop.hdfs.server.namenode.INode child,
org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot latest,
org.apache.hadoop.hdfs.server.namenode.INode snapshotCopy,
INodeMap inodeMap)
Save the child to the latest snapshot. |
static INodeDirectory |
valueOf(org.apache.hadoop.hdfs.server.namenode.INode inode,
Object path)
Cast INode to INodeDirectory. |
Methods inherited from class org.apache.hadoop.hdfs.server.namenode.INodeWithAdditionalFields |
---|
getFsPermissionShort, getId, getLocalNameBytes, getNext, getPermissionLong, setAccessTime, setLocalName, setModificationTime, setNext, updateModificationTime |
Methods inherited from class org.apache.hadoop.hdfs.server.namenode.INode |
---|
addSpaceConsumed, asFile, asReference, asSymlink, compareTo, computeContentSummary, computeQuotaUsage, computeQuotaUsage, dumpTreeRecursively, dumpTreeRecursively, equals, getAccessTime, getDsQuota, getFsPermission, getFullPathName, getGroupName, getKey, getLocalName, getModificationTime, getNsQuota, getObjectString, getParent, getParentReference, getParentString, getSnapshotINode, getUserName, hashCode, isAncestorDirectory, isFile, isInLatestSnapshot, isQuotaSet, isReference, isSymlink, setAccessTime, setModificationTime, setParent, setParentReference, shouldRecordInSrcSnapshot, toDetailString, toString |
Methods inherited from class java.lang.Object |
---|
clone, finalize, getClass, notify, notifyAll, wait, wait, wait |
Methods inherited from interface org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes |
---|
getDsQuota, getNsQuota |
Methods inherited from interface org.apache.hadoop.hdfs.server.namenode.INodeAttributes |
---|
getAccessTime, getFsPermission, getFsPermissionShort, getGroupName, getLocalNameBytes, getModificationTime, getPermissionLong, getUserName |
Field Detail |
---|
protected static final int DEFAULT_FILES_PER_DIRECTORY
Constructor Detail |
---|
public INodeDirectory(long id, byte[] name, org.apache.hadoop.fs.permission.PermissionStatus permissions, long mtime)
public INodeDirectory(INodeDirectory other, boolean adopt)
other
- The INodeDirectory to be copiedadopt
- Indicate whether or not need to set the parent field of child
INodes to the new nodeMethod Detail |
---|
public static INodeDirectory valueOf(org.apache.hadoop.hdfs.server.namenode.INode inode, Object path) throws FileNotFoundException, org.apache.hadoop.fs.PathIsNotDirectoryException
FileNotFoundException
org.apache.hadoop.fs.PathIsNotDirectoryException
public final boolean isDirectory()
org.apache.hadoop.hdfs.server.namenode.INode
isDirectory
in class org.apache.hadoop.hdfs.server.namenode.INode
public final INodeDirectory asDirectory()
org.apache.hadoop.hdfs.server.namenode.INode
INodeDirectory
.
asDirectory
in class org.apache.hadoop.hdfs.server.namenode.INode
public boolean isSnapshottable()
public boolean removeChild(org.apache.hadoop.hdfs.server.namenode.INode child, org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot latest, INodeMap inodeMap) throws org.apache.hadoop.hdfs.protocol.QuotaExceededException
child
- the child inode to be removedlatest
- See recordModification(Snapshot, INodeMap)
.
org.apache.hadoop.hdfs.protocol.QuotaExceededException
protected final boolean removeChild(org.apache.hadoop.hdfs.server.namenode.INode child)
child
- the child inode to be removed
public org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable replaceSelf4INodeDirectorySnapshottable(org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot latest, INodeMap inodeMap) throws org.apache.hadoop.hdfs.protocol.QuotaExceededException
INodeDirectorySnapshottable
.
org.apache.hadoop.hdfs.protocol.QuotaExceededException
public INodeDirectoryWithSnapshot replaceSelf4INodeDirectoryWithSnapshot(INodeMap inodeMap)
INodeDirectoryWithSnapshot
.
public INodeDirectory replaceSelf4INodeDirectory(INodeMap inodeMap)
INodeDirectory
.
public void replaceChild(org.apache.hadoop.hdfs.server.namenode.INode oldChild, org.apache.hadoop.hdfs.server.namenode.INode newChild, INodeMap inodeMap)
public INodeDirectory recordModification(org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot latest, INodeMap inodeMap) throws org.apache.hadoop.hdfs.protocol.QuotaExceededException
org.apache.hadoop.hdfs.server.namenode.INode
recordModification
in class org.apache.hadoop.hdfs.server.namenode.INode
latest
- the latest snapshot that has been taken.
Note that it is null if no snapshots have been taken.inodeMap
- while recording modification, the inode or its parent may
get replaced, and the inodeMap needs to be updated.
org.apache.hadoop.hdfs.protocol.QuotaExceededException
public org.apache.hadoop.hdfs.server.namenode.INode saveChild2Snapshot(org.apache.hadoop.hdfs.server.namenode.INode child, org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot latest, org.apache.hadoop.hdfs.server.namenode.INode snapshotCopy, INodeMap inodeMap) throws org.apache.hadoop.hdfs.protocol.QuotaExceededException
org.apache.hadoop.hdfs.protocol.QuotaExceededException
public org.apache.hadoop.hdfs.server.namenode.INode getChild(byte[] name, org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot snapshot)
name
- the name of the childsnapshot
- if it is not null, get the result from the given snapshot;
otherwise, get the result from the current directory.
public boolean addChild(org.apache.hadoop.hdfs.server.namenode.INode node, boolean setModTime, org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot latest, INodeMap inodeMap) throws org.apache.hadoop.hdfs.protocol.QuotaExceededException
node
- INode to insertsetModTime
- set modification time for the parent node
not needed when replaying the addition and
the parent already has the proper mod timeinodeMap
- update the inodeMap if the directory node gets replaced
org.apache.hadoop.hdfs.protocol.QuotaExceededException
public boolean addChild(org.apache.hadoop.hdfs.server.namenode.INode node)
public Quota.Counts computeQuotaUsage(Quota.Counts counts, boolean useCache, int lastSnapshotId)
org.apache.hadoop.hdfs.server.namenode.INode
Quota.NAMESPACE
and Quota.DISKSPACE
usages.
With the existence of INodeReference
, the same inode and its
subtree may be referred by multiple INodeReference.WithName
nodes and a
INodeReference.DstReference
node. To avoid circles while quota usage computation,
we have the following rules:
1. For aINodeReference.DstReference
node, since the node must be in the current tree (or has been deleted as the end point of a series of rename operations), we compute the quota usage of the referred node (and its subtree) in the regular manner, i.e., including every inode in the current tree and in snapshot copies, as well as the size of diff list. 2. For aINodeReference.WithName
node, since the node must be in a snapshot, we only count the quota usage for those nodes that still existed at the creation time of the snapshot associated with theINodeReference.WithName
node. We do not count in the size of the diff list.
computeQuotaUsage
in class org.apache.hadoop.hdfs.server.namenode.INode
counts
- The subtree counts for returning.useCache
- Whether to use cached quota usage. Note that
INodeReference.WithName
node never uses cache for its subtree.lastSnapshotId
- Snapshot.INVALID_ID
indicates the computation
is in the current tree. Otherwise the id indicates
the computation range for a INodeReference.WithName
node.
public Quota.Counts computeQuotaUsage4CurrentDirectory(Quota.Counts counts)
public Content.Counts computeContentSummary(Content.Counts counts)
org.apache.hadoop.hdfs.server.namenode.INode
Content.Counts
.
computeContentSummary
in class org.apache.hadoop.hdfs.server.namenode.INode
counts
- The subtree counts for returning.
public org.apache.hadoop.hdfs.util.ReadOnlyList<org.apache.hadoop.hdfs.server.namenode.INode> getChildrenList(org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot snapshot)
snapshot
- if it is not null, get the result from the given snapshot;
otherwise, get the result from the current directory.
public void clearChildren()
public void clear()
org.apache.hadoop.hdfs.server.namenode.INode
clear
in class org.apache.hadoop.hdfs.server.namenode.INode
public Quota.Counts cleanSubtreeRecursively(org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot snapshot, org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot prior, INode.BlocksMapUpdateInfo collectedBlocks, List<org.apache.hadoop.hdfs.server.namenode.INode> removedINodes, Map<org.apache.hadoop.hdfs.server.namenode.INode,org.apache.hadoop.hdfs.server.namenode.INode> excludedNodes, boolean countDiffChange) throws org.apache.hadoop.hdfs.protocol.QuotaExceededException
org.apache.hadoop.hdfs.protocol.QuotaExceededException
public void destroyAndCollectBlocks(INode.BlocksMapUpdateInfo collectedBlocks, List<org.apache.hadoop.hdfs.server.namenode.INode> removedINodes)
org.apache.hadoop.hdfs.server.namenode.INode
destroyAndCollectBlocks
in class org.apache.hadoop.hdfs.server.namenode.INode
collectedBlocks
- blocks collected from the descents for further block
deletion/update will be added to this map.removedINodes
- INodes collected from the descents for further cleaning up of
inodeMappublic Quota.Counts cleanSubtree(org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot snapshot, org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot prior, INode.BlocksMapUpdateInfo collectedBlocks, List<org.apache.hadoop.hdfs.server.namenode.INode> removedINodes, boolean countDiffChange) throws org.apache.hadoop.hdfs.protocol.QuotaExceededException
org.apache.hadoop.hdfs.server.namenode.INode
In general, we have the following rules. 1. When deleting a file/directory in the current tree, we have different actions according to the type of the node to delete. 1.1 The current inode (this) is anINodeFile
. 1.1.1 Ifprior
is null, there is no snapshot taken on ancestors before. Thus we simply destroy (i.e., to delete completely, no need to save snapshot copy) the current INode and collect its blocks for further cleansing. 1.1.2 Else do nothing since the current INode will be stored as a snapshot copy. 1.2 The current inode is anINodeDirectory
. 1.2.1 Ifprior
is null, there is no snapshot taken on ancestors before. Similarly, we destroy the whole subtree and collect blocks. 1.2.2 Else do nothing with the current INode. Recursively clean its children. 1.3 The current inode is aFileWithSnapshot
. Call recordModification(..) to capture the current states. Mark the INode as deleted. 1.4 The current inode is aINodeDirectoryWithSnapshot
. Call recordModification(..) to capture the current states. Destroy files/directories created after the latest snapshot (i.e., the inodes stored in the created list of the latest snapshot). Recursively clean remaining children. 2. When deleting a snapshot. 2.1 To cleanINodeFile
: do nothing. 2.2 To cleanINodeDirectory
: recursively clean its children. 2.3 To cleanFileWithSnapshot
: delete the corresponding snapshot in its diff list. 2.4 To cleanINodeDirectoryWithSnapshot
: delete the corresponding snapshot in its diff list. Recursively clean its children.
cleanSubtree
in class org.apache.hadoop.hdfs.server.namenode.INode
snapshot
- The snapshot to delete. Null means to delete the current
file/directory.prior
- The latest snapshot before the to-be-deleted snapshot. When
deleting a current inode, this parameter captures the latest
snapshot.collectedBlocks
- blocks collected from the descents for further block
deletion/update will be added to the given map.removedINodes
- INodes collected from the descents for further cleaning up of
inodeMap
org.apache.hadoop.hdfs.protocol.QuotaExceededException
public boolean metadataEquals(org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes other)
metadataEquals
in interface org.apache.hadoop.hdfs.server.namenode.INodeDirectoryAttributes
public void dumpTreeRecursively(PrintWriter out, StringBuilder prefix, org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot snapshot)
org.apache.hadoop.hdfs.server.namenode.INode
dumpTreeRecursively
in class org.apache.hadoop.hdfs.server.namenode.INode
prefix
- The prefix string that each line should print.protected static void dumpTreeRecursively(PrintWriter out, StringBuilder prefix, Iterable<INodeDirectory.SnapshotAndINode> subs)
prefix
- The prefix string that each line should print.subs
- The subtrees.public final int getChildrenNum(org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot snapshot)
|
||||||||||
PREV CLASS NEXT CLASS | FRAMES NO FRAMES | |||||||||
SUMMARY: NESTED | FIELD | CONSTR | METHOD | DETAIL: FIELD | CONSTR | METHOD |