public class EsHiveInputFormat extends EsInputFormat<org.apache.hadoop.io.Text,org.apache.hadoop.io.Writable>
FileInputFormat
to ESInputFormat.EsInputFormat.AbstractWritableEsInputRecordReader<V>, EsInputFormat.EsInputRecordReader<K,V>, EsInputFormat.EsInputSplit, EsInputFormat.JsonWritableEsInputRecordReader, EsInputFormat.WritableEsInputRecordReader
Constructor and Description |
---|
EsHiveInputFormat() |
Modifier and Type | Method and Description |
---|---|
EsInputFormat.AbstractWritableEsInputRecordReader |
getRecordReader(org.apache.hadoop.mapred.InputSplit split,
org.apache.hadoop.mapred.JobConf job,
org.apache.hadoop.mapred.Reporter reporter) |
org.apache.hadoop.mapred.FileSplit[] |
getSplits(org.apache.hadoop.mapred.JobConf job,
int numSplits) |
createRecordReader, getSplits, isOutputAsJson
public org.apache.hadoop.mapred.FileSplit[] getSplits(org.apache.hadoop.mapred.JobConf job, int numSplits) throws java.io.IOException
getSplits
in interface org.apache.hadoop.mapred.InputFormat<org.apache.hadoop.io.Text,org.apache.hadoop.io.Writable>
getSplits
in class EsInputFormat<org.apache.hadoop.io.Text,org.apache.hadoop.io.Writable>
java.io.IOException
public EsInputFormat.AbstractWritableEsInputRecordReader getRecordReader(org.apache.hadoop.mapred.InputSplit split, org.apache.hadoop.mapred.JobConf job, org.apache.hadoop.mapred.Reporter reporter)
getRecordReader
in interface org.apache.hadoop.mapred.InputFormat<org.apache.hadoop.io.Text,org.apache.hadoop.io.Writable>
getRecordReader
in class EsInputFormat<org.apache.hadoop.io.Text,org.apache.hadoop.io.Writable>