Instance Constructors
-
new
FSGraphSource(rootPath: String, tableStorageFormat: StorageFormat, hiveDatabaseName: Option[String] = None, filesPerTable: Option[Int] = None)(implicit caps: CAPSSession)
Value Members
-
final
def
!=(arg0: Any): Boolean
-
final
def
##(): Int
-
final
def
==(arg0: Any): Boolean
-
final
def
asInstanceOf[T0]: T0
-
-
def
checkStorable(name: GraphName): Unit
-
def
clone(): AnyRef
-
def
delete(graphName: GraphName): Unit
-
def
deleteDirectory(path: String): Unit
-
def
deleteGraph(graphName: GraphName): Unit
-
-
final
def
eq(arg0: AnyRef): Boolean
-
def
equals(arg0: Any): Boolean
-
lazy val
fileSystem: FileSystem
-
val
filesPerTable: Option[Int]
-
def
finalize(): Unit
-
final
def
getClass(): Class[_]
-
def
graph(name: GraphName): PropertyGraph
-
var
graphNameCache: Set[GraphName]
-
def
graphNames: Set[GraphName]
-
def
hasGraph(graphName: GraphName): Boolean
-
def
hashCode(): Int
-
val
hiveDatabaseName: Option[String]
-
final
def
isInstanceOf[T0]: Boolean
-
def
listDirectories(path: String): List[String]
-
def
listGraphNames: List[String]
-
final
def
ne(arg0: AnyRef): Boolean
-
final
def
notify(): Unit
-
final
def
notifyAll(): Unit
-
def
readCAPSGraphMetaData(graphName: GraphName): CAPSGraphMetaData
-
def
readFile(path: String): String
-
def
readJsonCAPSGraphMetaData(graphName: GraphName): String
-
def
readJsonSchema(graphName: GraphName): String
-
def
readNodeTable(graphName: GraphName, labels: Set[String], sparkSchema: StructType): DataFrame
-
def
readRelationshipTable(graphName: GraphName, relKey: String, sparkSchema: StructType): DataFrame
-
def
readSchema(graphName: GraphName): CAPSSchema
-
def
readTable(path: String, schema: StructType): DataFrame
-
val
rootPath: String
-
def
schema(graphName: GraphName): Option[CAPSSchema]
-
var
schemaCache: Map[GraphName, CAPSSchema]
-
def
store(graphName: GraphName, graph: PropertyGraph): Unit
-
final
def
synchronized[T0](arg0: ⇒ T0): T0
-
-
def
toString(): String
-
final
def
wait(): Unit
-
final
def
wait(arg0: Long, arg1: Int): Unit
-
final
def
wait(arg0: Long): Unit
-
def
waitForWriteCompletion(writeFutures: Set[Future[Unit]])(implicit ec: ExecutionContext): Unit
-
def
writeCAPSGraphMetaData(graphName: GraphName, capsGraphMetaData: CAPSGraphMetaData): Unit
-
def
writeFile(path: String, content: String): Unit
-
def
writeJsonCAPSGraphMetaData(graphName: GraphName, capsGraphMetaData: String): Unit
-
def
writeJsonSchema(graphName: GraphName, schema: String): Unit
-
def
writeNodeTable(graphName: GraphName, labels: Set[String], table: DataFrame): Unit
-
def
writeRelationshipTable(graphName: GraphName, relKey: String, table: DataFrame): Unit
-
def
writeSchema(graphName: GraphName, schema: CAPSSchema): Unit
-
def
writeTable(path: String, table: DataFrame): Unit
Inherited from PropertyGraphDataSource
Inherited from AnyRef
Inherited from Any
Data source implementation that handles the writing of files and tables to a filesystem.
By default Spark is used to write tables and the Hadoop filesystem configured in Spark is used to write files. The file/folder/table structure into which the graphs are stored is defined in DefaultGraphDirectoryStructure.