public class HiveBatchAndStreamTableSource extends Object implements org.apache.flink.table.sources.StreamTableSource<org.apache.flink.table.data.RowData>, org.apache.flink.table.sources.PartitionableTableSource, org.apache.flink.table.sources.ProjectableTableSource<org.apache.flink.table.data.RowData>, org.apache.flink.table.sources.LimitableTableSource<org.apache.flink.table.data.RowData>, org.apache.flink.table.sources.LookupableTableSource<org.apache.flink.table.data.RowData>
Constructor and Description |
---|
HiveBatchAndStreamTableSource(org.apache.hadoop.mapred.JobConf jobConf,
org.apache.flink.configuration.ReadableConfig flinkConf,
org.apache.flink.table.catalog.ObjectPath tablePath,
org.apache.flink.table.catalog.CatalogTable catalogTable) |
Modifier and Type | Method and Description |
---|---|
org.apache.flink.table.sources.TableSource<org.apache.flink.table.data.RowData> |
applyLimit(long limit) |
org.apache.flink.table.sources.TableSource<org.apache.flink.table.data.RowData> |
applyPartitionPruning(List<Map<String,String>> remainingPartitions) |
String |
explainSource() |
org.apache.flink.table.functions.AsyncTableFunction<org.apache.flink.table.data.RowData> |
getAsyncLookupFunction(String[] lookupKeys) |
org.apache.flink.streaming.api.datastream.DataStream<org.apache.flink.table.data.RowData> |
getDataStream(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment execEnv) |
org.apache.flink.connectors.hive.read.HiveTableInputFormat |
getInputFormat() |
org.apache.flink.table.functions.TableFunction<org.apache.flink.table.data.RowData> |
getLookupFunction(String[] lookupKeys) |
List<Map<String,String>> |
getPartitions() |
org.apache.flink.table.types.DataType |
getProducedDataType() |
org.apache.flink.table.api.TableSchema |
getTableSchema() |
boolean |
isAsyncEnabled() |
boolean |
isBounded() |
boolean |
isLimitPushedDown() |
org.apache.flink.table.sources.TableSource<org.apache.flink.table.data.RowData> |
projectFields(int[] fields) |
static org.apache.flink.connectors.hive.HiveTablePartition |
toHiveTablePartition(List<String> partitionKeys,
String[] fieldNames,
org.apache.flink.table.types.DataType[] fieldTypes,
org.apache.flink.table.catalog.hive.client.HiveShim shim,
Properties tableProps,
String defaultPartitionName,
org.apache.hadoop.hive.metastore.api.Partition partition) |
public HiveBatchAndStreamTableSource(org.apache.hadoop.mapred.JobConf jobConf, org.apache.flink.configuration.ReadableConfig flinkConf, org.apache.flink.table.catalog.ObjectPath tablePath, org.apache.flink.table.catalog.CatalogTable catalogTable)
public boolean isBounded()
isBounded
in interface org.apache.flink.table.sources.StreamTableSource<org.apache.flink.table.data.RowData>
public org.apache.flink.streaming.api.datastream.DataStream<org.apache.flink.table.data.RowData> getDataStream(org.apache.flink.streaming.api.environment.StreamExecutionEnvironment execEnv)
getDataStream
in interface org.apache.flink.table.sources.StreamTableSource<org.apache.flink.table.data.RowData>
public org.apache.flink.connectors.hive.read.HiveTableInputFormat getInputFormat()
public org.apache.flink.table.api.TableSchema getTableSchema()
getTableSchema
in interface org.apache.flink.table.sources.TableSource<org.apache.flink.table.data.RowData>
public org.apache.flink.table.types.DataType getProducedDataType()
getProducedDataType
in interface org.apache.flink.table.sources.TableSource<org.apache.flink.table.data.RowData>
public boolean isLimitPushedDown()
isLimitPushedDown
in interface org.apache.flink.table.sources.LimitableTableSource<org.apache.flink.table.data.RowData>
public org.apache.flink.table.sources.TableSource<org.apache.flink.table.data.RowData> applyLimit(long limit)
applyLimit
in interface org.apache.flink.table.sources.LimitableTableSource<org.apache.flink.table.data.RowData>
public List<Map<String,String>> getPartitions()
getPartitions
in interface org.apache.flink.table.sources.PartitionableTableSource
public org.apache.flink.table.sources.TableSource<org.apache.flink.table.data.RowData> applyPartitionPruning(List<Map<String,String>> remainingPartitions)
applyPartitionPruning
in interface org.apache.flink.table.sources.PartitionableTableSource
public org.apache.flink.table.sources.TableSource<org.apache.flink.table.data.RowData> projectFields(int[] fields)
projectFields
in interface org.apache.flink.table.sources.ProjectableTableSource<org.apache.flink.table.data.RowData>
public static org.apache.flink.connectors.hive.HiveTablePartition toHiveTablePartition(List<String> partitionKeys, String[] fieldNames, org.apache.flink.table.types.DataType[] fieldTypes, org.apache.flink.table.catalog.hive.client.HiveShim shim, Properties tableProps, String defaultPartitionName, org.apache.hadoop.hive.metastore.api.Partition partition)
public String explainSource()
explainSource
in interface org.apache.flink.table.sources.TableSource<org.apache.flink.table.data.RowData>
public org.apache.flink.table.functions.TableFunction<org.apache.flink.table.data.RowData> getLookupFunction(String[] lookupKeys)
getLookupFunction
in interface org.apache.flink.table.sources.LookupableTableSource<org.apache.flink.table.data.RowData>
public org.apache.flink.table.functions.AsyncTableFunction<org.apache.flink.table.data.RowData> getAsyncLookupFunction(String[] lookupKeys)
getAsyncLookupFunction
in interface org.apache.flink.table.sources.LookupableTableSource<org.apache.flink.table.data.RowData>
public boolean isAsyncEnabled()
isAsyncEnabled
in interface org.apache.flink.table.sources.LookupableTableSource<org.apache.flink.table.data.RowData>
Copyright © 2021 Alibaba Group. All rights reserved.