public class CreateMetastoreDataSource
extends org.apache.spark.sql.catalyst.plans.logical.Command
implements org.apache.spark.sql.execution.RunnableCommand, scala.Product, scala.Serializable
Constructor and Description |
---|
CreateMetastoreDataSource(String tableName,
scala.Option<StructType> userSpecifiedSchema,
String provider,
scala.collection.immutable.Map<String,String> options,
boolean allowExisting,
boolean managedIfNoPath) |
Modifier and Type | Method and Description |
---|---|
boolean |
allowExisting() |
boolean |
managedIfNoPath() |
scala.collection.immutable.Map<String,String> |
options() |
String |
provider() |
scala.collection.Seq<Row> |
run(SQLContext sqlContext) |
String |
tableName() |
scala.Option<StructType> |
userSpecifiedSchema() |
childrenResolved, cleanArgs, isTraceEnabled, log, logDebug, logDebug, logError, logError, logInfo, logInfo, logName, logTrace, logTrace, logWarning, logWarning, org$apache$spark$Logging$$log__$eq, org$apache$spark$Logging$$log_, org$apache$spark$sql$catalyst$plans$logical$LogicalPlan$$resolveAsColumn, org$apache$spark$sql$catalyst$plans$logical$LogicalPlan$$resolveAsTableColumn, resolve, resolve, resolve$default$3, resolveChildren, resolveChildren$default$3, resolved, resolveGetField, sameResult, statePrefix, statistics
expressions, inputSet, missingInput, org$apache$spark$sql$catalyst$plans$QueryPlan$$transformExpressionDown$1, org$apache$spark$sql$catalyst$plans$QueryPlan$$transformExpressionUp$1, outputSet, printSchema, references, schema, schemaString, simpleString, transformAllExpressions, transformExpressions, transformExpressionsDown, transformExpressionsUp
apply, argString, asCode, collect, fastEquals, flatMap, foreach, foreachUp, generateTreeString, getNodeNumbered, makeCopy, map, mapChildren, nodeName, numberedTreeString, origin, otherCopyArgs, stringArgs, toString, transform, transformChildrenDown, transformChildrenUp, transformDown, transformUp, treeString, withNewChildren
productArity, productElement, productIterator, productPrefix
initializeIfNecessary, initializeLogging, log_
public CreateMetastoreDataSource(String tableName, scala.Option<StructType> userSpecifiedSchema, String provider, scala.collection.immutable.Map<String,String> options, boolean allowExisting, boolean managedIfNoPath)
public String tableName()
public scala.Option<StructType> userSpecifiedSchema()
public String provider()
public scala.collection.immutable.Map<String,String> options()
public boolean allowExisting()
public boolean managedIfNoPath()
public scala.collection.Seq<Row> run(SQLContext sqlContext)
run
in interface org.apache.spark.sql.execution.RunnableCommand