Deprecated Methods |
org.apache.hadoop.hbase.regionserver.wal.WALEdit.add(KeyValue)
Use WALEdit.add(Cell) instead |
org.apache.hadoop.hbase.regionserver.wal.HLog.appendNoSync(HRegionInfo, TableName, WALEdit, List, long, HTableDescriptor, AtomicLong, boolean, long, long)
|
org.apache.hadoop.hbase.regionserver.Store.compact(CompactionContext, CompactionThroughputController)
see compact(CompactionContext, CompactionThroughputController, User) |
org.apache.hadoop.hbase.mapreduce.CellCreator.create(byte[], int, int, byte[], int, int, byte[], int, int, long, byte[], int, int, String)
|
org.apache.hadoop.hbase.mapreduce.CopyTable.createSubmittableJob(Configuration, String[])
Use CopyTable.createSubmittableJob(String[]) instead |
org.apache.hadoop.hbase.security.visibility.DefaultVisibilityLabelServiceImpl.getAuths(byte[], boolean)
|
org.apache.hadoop.hbase.security.visibility.VisibilityLabelService.getAuths(byte[], boolean)
Use |
org.apache.hadoop.hbase.regionserver.wal.WALEdit.getKeyValues()
Use WALEdit.getCells() instead |
org.apache.hadoop.hbase.regionserver.HRegion.getRegionDir(Path, HRegionInfo)
|
org.apache.hadoop.hbase.regionserver.HRegion.getRegionDir(Path, String)
|
org.apache.hadoop.hbase.regionserver.StoreFile.Reader.getScanner(boolean, boolean)
|
org.apache.hadoop.hbase.regionserver.StoreFile.Reader.getScanner(boolean, boolean, boolean)
|
org.apache.hadoop.hbase.regionserver.HStore.getStoreHomedir(Path, HRegionInfo, byte[])
|
org.apache.hadoop.hbase.regionserver.HStore.getStoreHomedir(Path, String, byte[])
|
org.apache.hadoop.hbase.security.visibility.DefaultVisibilityLabelServiceImpl.havingSystemAuth(byte[])
|
org.apache.hadoop.hbase.security.visibility.VisibilityLabelService.havingSystemAuth(byte[])
Use |
org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil.initCredentialsForCluster(Job, String)
Since 1.2.0, use TableMapReduceUtil.initCredentialsForCluster(Job, Configuration) instead. |
org.apache.hadoop.hbase.regionserver.HRegion.initialize()
use HRegion.createHRegion() or HRegion.openHRegion() |
org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate.isLogDeletable(FileStatus)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.postCompact(ObserverContext, Store, StoreFile)
Use RegionObserver.postCompact(ObserverContext, Store, StoreFile, CompactionRequest)
instead |
org.apache.hadoop.hbase.coprocessor.RegionObserver.postCompactSelection(ObserverContext, Store, ImmutableList)
use RegionObserver.postCompactSelection(ObserverContext, Store, ImmutableList,
CompactionRequest) instead. |
org.apache.hadoop.hbase.coprocessor.RegionObserver.postFlush(ObserverContext)
use RegionObserver.preFlush(ObserverContext, Store, InternalScanner) instead. |
org.apache.hadoop.hbase.coprocessor.BaseRegionObserver.postGet(ObserverContext, Get, List)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.postGet(ObserverContext, Get, List)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.postIncrementColumnValue(ObserverContext, byte[], byte[], byte[], long, boolean, long)
This hook is no longer called by the RegionServer |
org.apache.hadoop.hbase.regionserver.RegionServerServices.postOpenDeployTasks(HRegion, CatalogTracker)
use #postOpenDeployTasks(PostOpenDeployContext) |
org.apache.hadoop.hbase.coprocessor.RegionObserver.postSplit(ObserverContext, HRegion, HRegion)
Use postCompleteSplit() instead |
org.apache.hadoop.hbase.coprocessor.RegionObserver.preCompact(ObserverContext, Store, InternalScanner, ScanType)
use
RegionObserver.preCompact(ObserverContext, Store, InternalScanner,
ScanType, CompactionRequest) instead |
org.apache.hadoop.hbase.coprocessor.RegionObserver.preCompactScannerOpen(ObserverContext, Store, List extends KeyValueScanner>, ScanType, long, InternalScanner)
Use
RegionObserver.preCompactScannerOpen(ObserverContext, Store, List, ScanType, long,
InternalScanner, CompactionRequest) instead. |
org.apache.hadoop.hbase.coprocessor.RegionObserver.preCompactSelection(ObserverContext, Store, List)
Use RegionObserver.preCompactSelection(ObserverContext, Store, List, CompactionRequest)
instead |
org.apache.hadoop.hbase.coprocessor.RegionObserver.preFlush(ObserverContext)
use RegionObserver.preFlush(ObserverContext, Store, InternalScanner) instead |
org.apache.hadoop.hbase.coprocessor.BaseRegionObserver.preGet(ObserverContext, Get, List)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.preGet(ObserverContext, Get, List)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.preIncrementColumnValue(ObserverContext, byte[], byte[], byte[], long, boolean)
This hook is no longer called by the RegionServer |
org.apache.hadoop.hbase.coprocessor.RegionObserver.preSplit(ObserverContext)
Use preSplit(
final ObserverContext c, byte[] splitRow) |
org.apache.hadoop.hbase.io.Reference.readFields(DataInput)
Writables are going away. Use the pb serialization methods instead.
Remove in a release after 0.96 goes out. This is here only to migrate
old Reference files written with Writables before 0.96. |
org.apache.hadoop.hbase.regionserver.RegionServerServices.reportRegionStateTransition(RegionServerStatusProtos.RegionStateTransition.TransitionCode, HRegionInfo...)
|
org.apache.hadoop.hbase.regionserver.RegionServerServices.reportRegionStateTransition(RegionServerStatusProtos.RegionStateTransition.TransitionCode, long, HRegionInfo...)
|
org.apache.hadoop.hbase.regionserver.Store.requestCompaction(int, CompactionRequest)
see requestCompaction(int, CompactionRequest, User) |
org.apache.hadoop.hbase.regionserver.RegionSplitPolicy.skipStoreFileRangeCheck()
Use RegionSplitPolicy.skipStoreFileRangeCheck(String) } instead |
org.apache.hadoop.hbase.regionserver.RegionMergeTransaction.stepsAfterPONR(Server, RegionServerServices, HRegion)
|
org.apache.hadoop.hbase.regionserver.SplitTransaction.stepsAfterPONR(Server, RegionServerServices, PairOfSameType)
|
org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.tryAtomicRegionLoad(HConnection, byte[], byte[], Collection)
Use LoadIncrementalHFiles.tryAtomicRegionLoad(HConnection, TableName, byte[], Collection) |
org.apache.hadoop.hbase.regionserver.wal.HLogKey.write(DataOutput)
|