org.apache.hadoop.hbase.client.HTableWrapper.batch(List extends Row>)
|
org.apache.hadoop.hbase.client.HTableWrapper.batchCallback(List extends Row>, Batch.Callback)
|
org.apache.hadoop.hbase.mapreduce.HFileOutputFormat2.configureIncrementalLoad(Job, HTable)
|
org.apache.hadoop.hbase.mapreduce.CellCreator.create(byte[], int, int, byte[], int, int, byte[], int, int, long, byte[], int, int, String) |
org.apache.hadoop.hbase.regionserver.SimpleRpcSchedulerFactory.create(Configuration, PriorityFunction) |
org.apache.hadoop.hbase.regionserver.RpcSchedulerFactory.create(Configuration, PriorityFunction) |
org.apache.hadoop.hbase.client.HTableWrapper.exists(List) |
org.apache.hadoop.hbase.security.visibility.VisibilityLabelService.getAuths(byte[], boolean)
|
org.apache.hadoop.hbase.security.visibility.DefaultVisibilityLabelServiceImpl.getAuths(byte[], boolean) |
org.apache.hadoop.hbase.TagRewriteCell.getFamily() |
org.apache.hadoop.hbase.mapred.TableInputFormatBase.getHTable()
|
org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.getHTable()
|
org.apache.hadoop.hbase.TagRewriteCell.getMvccVersion() |
org.apache.hadoop.hbase.http.InfoServer.getPort() |
org.apache.hadoop.hbase.http.HttpServer.getPort() |
org.apache.hadoop.hbase.TagRewriteCell.getQualifier() |
org.apache.hadoop.hbase.regionserver.HRegion.getRegionDir(Path, HRegionInfo) |
org.apache.hadoop.hbase.regionserver.HRegion.getRegionDir(Path, String) |
org.apache.hadoop.hbase.TagRewriteCell.getRow() |
org.apache.hadoop.hbase.client.HTableWrapper.getRowOrBefore(byte[], byte[]) |
org.apache.hadoop.hbase.regionserver.StoreFile.Reader.getScanner(boolean, boolean) |
org.apache.hadoop.hbase.regionserver.StoreFile.Reader.getScanner(boolean, boolean, boolean) |
org.apache.hadoop.hbase.regionserver.HStore.getStoreHomedir(Path, HRegionInfo, byte[]) |
org.apache.hadoop.hbase.regionserver.HStore.getStoreHomedir(Path, String, byte[]) |
org.apache.hadoop.hbase.TagRewriteCell.getValue() |
org.apache.hadoop.hbase.security.visibility.VisibilityLabelService.havingSystemAuth(byte[])
|
org.apache.hadoop.hbase.security.visibility.DefaultVisibilityLabelServiceImpl.havingSystemAuth(byte[]) |
org.apache.hadoop.hbase.regionserver.HRegion.initialize()
use HRegion.createHRegion() or HRegion.openHRegion()
|
org.apache.hadoop.hbase.master.cleaner.BaseLogCleanerDelegate.isLogDeletable(FileStatus) |
org.apache.hadoop.hbase.coprocessor.RegionObserver.postCompact(ObserverContext, Store, StoreFile)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.postCompactSelection(ObserverContext, Store, ImmutableList)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.postFlush(ObserverContext)
|
org.apache.hadoop.hbase.master.MasterCoprocessorHost.postGetTableDescriptors(List) |
org.apache.hadoop.hbase.coprocessor.MasterObserver.postGetTableDescriptors(ObserverContext, List)
Use postGetTableDescriptors with regex instead.
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.postIncrementColumnValue(ObserverContext, byte[], byte[], byte[], long, boolean, long)
This hook is no longer called by the RegionServer
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.postSplit(ObserverContext, HRegion, HRegion)
Use postCompleteSplit() instead
|
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.postWALRestore(HRegionInfo, HLogKey, WALEdit)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.postWALRestore(ObserverContext, HRegionInfo, HLogKey, WALEdit)
|
org.apache.hadoop.hbase.coprocessor.WALObserver.postWALWrite(ObserverContext, HRegionInfo, HLogKey, WALEdit)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.preCompact(ObserverContext, Store, InternalScanner, ScanType)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.preCompactScannerOpen(ObserverContext, Store, List extends KeyValueScanner>, ScanType, long, InternalScanner)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.preCompactSelection(ObserverContext, Store, List)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.preFlush(ObserverContext)
|
org.apache.hadoop.hbase.master.MasterCoprocessorHost.preGetTableDescriptors(List, List) |
org.apache.hadoop.hbase.coprocessor.MasterObserver.preGetTableDescriptors(ObserverContext, List, List)
Use preGetTableDescriptors with regex instead.
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.preIncrementColumnValue(ObserverContext, byte[], byte[], byte[], long, boolean)
This hook is no longer called by the RegionServer
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.preSplit(ObserverContext)
Use preSplit(
final ObserverContext c, byte[] splitRow)
|
org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost.preWALRestore(HRegionInfo, HLogKey, WALEdit)
|
org.apache.hadoop.hbase.coprocessor.RegionObserver.preWALRestore(ObserverContext, HRegionInfo, HLogKey, WALEdit)
|
org.apache.hadoop.hbase.coprocessor.WALObserver.preWALWrite(ObserverContext, HRegionInfo, HLogKey, WALEdit)
|
org.apache.hadoop.hbase.io.Reference.readFields(DataInput)
Writables are going away. Use the pb serialization methods instead.
Remove in a release after 0.96 goes out. This is here only to migrate
old Reference files written with Writables before 0.96.
|
org.apache.hadoop.hbase.io.hfile.HFileScanner.reseekTo(byte[]) |
org.apache.hadoop.hbase.io.hfile.HFileScanner.reseekTo(byte[], int, int) |
org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.reverseDNS(InetAddress)
mistakenly made public in 0.98.7. scope will change to package-private
|
org.apache.hadoop.hbase.io.hfile.HFileScanner.seekBefore(byte[]) |
org.apache.hadoop.hbase.io.hfile.HFileScanner.seekBefore(byte[], int, int) |
org.apache.hadoop.hbase.io.hfile.HFileScanner.seekTo(byte[]) |
org.apache.hadoop.hbase.io.hfile.HFileScanner.seekTo(byte[], int, int) |
org.apache.hadoop.hbase.http.HttpServer.Builder.setBindAddress(String) |
org.apache.hadoop.hbase.mapred.TableInputFormatBase.setHTable(HTable)
|
org.apache.hadoop.hbase.mapreduce.TableInputFormatBase.setHTable(HTable)
|
org.apache.hadoop.hbase.mapreduce.TableRecordReader.setHTable(Table)
Use setTable() instead.
|
org.apache.hadoop.hbase.http.HttpServer.Builder.setName(String) |
org.apache.hadoop.hbase.http.HttpServer.Builder.setPort(int) |
org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles.tryAtomicRegionLoad(HConnection, byte[], byte[], Collection)
|
org.apache.hadoop.hbase.regionserver.wal.HLogKey.write(DataOutput) |