org.apache.spark.sql.catalyst.plans.logical.Command Scala Examples
The following examples show how to use org.apache.spark.sql.catalyst.plans.logical.Command.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example.
Example 1
Source File: ddl.scala From drizzle-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.execution.datasources import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.catalog.CatalogTable import org.apache.spark.sql.catalyst.plans.QueryPlan import org.apache.spark.sql.catalyst.plans.logical.{Command, LogicalPlan} import org.apache.spark.sql.execution.command.RunnableCommand import org.apache.spark.sql.types._ case class CreateTable( tableDesc: CatalogTable, mode: SaveMode, query: Option[LogicalPlan]) extends Command { assert(tableDesc.provider.isDefined, "The table to be created must have a provider.") if (query.isEmpty) { assert( mode == SaveMode.ErrorIfExists || mode == SaveMode.Ignore, "create table without data insertion can only use ErrorIfExists or Ignore as SaveMode.") } override def innerChildren: Seq[QueryPlan[_]] = query.toSeq } class CaseInsensitiveMap(map: Map[String, String]) extends Map[String, String] with Serializable { val baseMap = map.map(kv => kv.copy(_1 = kv._1.toLowerCase)) override def get(k: String): Option[String] = baseMap.get(k.toLowerCase) override def + [B1 >: String](kv: (String, B1)): Map[String, B1] = baseMap + kv.copy(_1 = kv._1.toLowerCase) override def iterator: Iterator[(String, String)] = baseMap.iterator override def -(key: String): Map[String, String] = baseMap - key.toLowerCase }
Example 2
Source File: DataWritingCommand.scala From XSQL with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.execution.command import org.apache.hadoop.conf.Configuration import org.apache.spark.sql.{Row, SparkSession} import org.apache.spark.sql.catalyst.expressions.Attribute import org.apache.spark.sql.catalyst.plans.logical.{Command, LogicalPlan} import org.apache.spark.sql.execution.SparkPlan import org.apache.spark.sql.execution.datasources.BasicWriteJobStatsTracker import org.apache.spark.sql.execution.datasources.FileFormatWriter import org.apache.spark.sql.execution.metric.SQLMetric import org.apache.spark.util.SerializableConfiguration def logicalPlanOutputWithNames( query: LogicalPlan, names: Seq[String]): Seq[Attribute] = { // Save the output attributes to a variable to avoid duplicated function calls. val outputAttributes = query.output assert(outputAttributes.length == names.length, "The length of provided names doesn't match the length of output attributes.") outputAttributes.zip(names).map { case (attr, outputName) => attr.withName(outputName) } } }
Example 3
Source File: CreateTablePartitionedByUsing.scala From HANAVora-Extensions with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.execution.datasources import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.expressions.Attribute import org.apache.spark.sql.catalyst.plans.logical.{Command, LogicalPlan} import org.apache.spark.sql.types.StructType case class CreateTablePartitionedByUsing(tableIdent: TableIdentifier, userSpecifiedSchema: Option[StructType], provider: String, partitioningFunc: String, partitioningColumns: Seq[String], temporary: Boolean, options: Map[String, String], allowExisting: Boolean, managedIfNoPath: Boolean) extends LogicalPlan with Command { override def output: Seq[Attribute] = Seq.empty override def children: Seq[LogicalPlan] = Seq.empty }
Example 4
Source File: ddl.scala From sparkoscope with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.execution.datasources import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.catalog.{CatalogTable, CatalogUtils} import org.apache.spark.sql.catalyst.plans.QueryPlan import org.apache.spark.sql.catalyst.plans.logical.{Command, LogicalPlan} import org.apache.spark.sql.execution.command.RunnableCommand import org.apache.spark.sql.types._ case class CreateTable( tableDesc: CatalogTable, mode: SaveMode, query: Option[LogicalPlan]) extends Command { assert(tableDesc.provider.isDefined, "The table to be created must have a provider.") if (query.isEmpty) { assert( mode == SaveMode.ErrorIfExists || mode == SaveMode.Ignore, "create table without data insertion can only use ErrorIfExists or Ignore as SaveMode.") } override def innerChildren: Seq[QueryPlan[_]] = query.toSeq } case class CreateTempViewUsing( tableIdent: TableIdentifier, userSpecifiedSchema: Option[StructType], replace: Boolean, global: Boolean, provider: String, options: Map[String, String]) extends RunnableCommand { if (tableIdent.database.isDefined) { throw new AnalysisException( s"Temporary view '$tableIdent' should not have specified a database") } override def argString: String = { s"[tableIdent:$tableIdent " + userSpecifiedSchema.map(_ + " ").getOrElse("") + s"replace:$replace " + s"provider:$provider " + CatalogUtils.maskCredentials(options) } def run(sparkSession: SparkSession): Seq[Row] = { val dataSource = DataSource( sparkSession, userSpecifiedSchema = userSpecifiedSchema, className = provider, options = options) val catalog = sparkSession.sessionState.catalog val viewDefinition = Dataset.ofRows( sparkSession, LogicalRelation(dataSource.resolveRelation())).logicalPlan if (global) { catalog.createGlobalTempView(tableIdent.table, viewDefinition, replace) } else { catalog.createTempView(tableIdent.table, viewDefinition, replace) } Seq.empty[Row] } } case class RefreshTable(tableIdent: TableIdentifier) extends RunnableCommand { override def run(sparkSession: SparkSession): Seq[Row] = { // Refresh the given table's metadata. If this table is cached as an InMemoryRelation, // drop the original cached version and make the new version cached lazily. sparkSession.catalog.refreshTable(tableIdent.quotedString) Seq.empty[Row] } } case class RefreshResource(path: String) extends RunnableCommand { override def run(sparkSession: SparkSession): Seq[Row] = { sparkSession.catalog.refreshByPath(path) Seq.empty[Row] } }
Example 5
Source File: CarbonMVRules.scala From carbondata with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.hive import org.apache.spark.sql.SparkSession import org.apache.spark.sql.catalyst.plans.logical.{Command, LogicalPlan} import org.apache.spark.sql.catalyst.rules.Rule import org.apache.spark.util.CarbonReflectionUtils case class CarbonMVRules(sparkSession: SparkSession) extends Rule[LogicalPlan] { val mvPlan = try { CarbonReflectionUtils.createObject( "org.apache.carbondata.mv.extension.MVAnalyzerRule", sparkSession)._1.asInstanceOf[Rule[LogicalPlan]] } catch { case e: Exception => null } override def apply(plan: LogicalPlan): LogicalPlan = { plan match { case _: Command => plan case _ => if (mvPlan != null) { mvPlan.apply(plan) } else { plan } } } }
Example 6
Source File: CarbonExplainCommand.scala From carbondata with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.execution.command.table import org.apache.spark.sql.{Row, SparkSession} import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeReference} import org.apache.spark.sql.catalyst.plans.logical.{Command, LogicalPlan, Union} import org.apache.spark.sql.execution.command.{ExplainCommand, MetadataCommand} import org.apache.spark.sql.types.StringType import org.apache.carbondata.core.profiler.ExplainCollector case class CarbonExplainCommand( child: LogicalPlan, override val output: Seq[Attribute] = Seq(AttributeReference("plan", StringType, nullable = true)())) extends MetadataCommand { override def processMetadata(sparkSession: SparkSession): Seq[Row] = { val explainCommand = child.asInstanceOf[ExplainCommand] setAuditInfo(Map("query" -> explainCommand.logicalPlan.simpleString)) val isCommand = explainCommand.logicalPlan match { case _: Command => true case Union(childern) if childern.forall(_.isInstanceOf[Command]) => true case _ => false } if (explainCommand.logicalPlan.isStreaming || isCommand) { explainCommand.run(sparkSession) } else { CarbonExplainCommand.collectProfiler(explainCommand, sparkSession) ++ explainCommand.run(sparkSession) } } override protected def opName: String = "EXPLAIN" } case class CarbonInternalExplainCommand( explainCommand: ExplainCommand, override val output: Seq[Attribute] = Seq(AttributeReference("plan", StringType, nullable = true)())) extends MetadataCommand { override def processMetadata(sparkSession: SparkSession): Seq[Row] = { CarbonExplainCommand .collectProfiler(explainCommand, sparkSession) ++ explainCommand.run(sparkSession) } override protected def opName: String = "Carbon EXPLAIN" } object CarbonExplainCommand { def collectProfiler( explain: ExplainCommand, sparkSession: SparkSession): Seq[Row] = { try { ExplainCollector.setup() if (ExplainCollector.enabled()) { val queryExecution = sparkSession.sessionState.executePlan(explain.logicalPlan) queryExecution.toRdd.partitions // For count(*) queries the explain collector will be disabled, so profiler // informations not required in such scenarios. if (null == ExplainCollector.getFormatedOutput) { Seq.empty } Seq(Row("== CarbonData Profiler ==\n" + ExplainCollector.getFormatedOutput)) } else { Seq.empty } } finally { ExplainCollector.remove() } } }
Example 7
Source File: ddl.scala From multi-tenancy-spark with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.execution.datasources import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.TableIdentifier import org.apache.spark.sql.catalyst.catalog.{CatalogTable, CatalogUtils} import org.apache.spark.sql.catalyst.plans.QueryPlan import org.apache.spark.sql.catalyst.plans.logical.{Command, LogicalPlan} import org.apache.spark.sql.execution.command.RunnableCommand import org.apache.spark.sql.types._ case class CreateTable( tableDesc: CatalogTable, mode: SaveMode, query: Option[LogicalPlan]) extends Command { assert(tableDesc.provider.isDefined, "The table to be created must have a provider.") if (query.isEmpty) { assert( mode == SaveMode.ErrorIfExists || mode == SaveMode.Ignore, "create table without data insertion can only use ErrorIfExists or Ignore as SaveMode.") } override def innerChildren: Seq[QueryPlan[_]] = query.toSeq } case class CreateTempViewUsing( tableIdent: TableIdentifier, userSpecifiedSchema: Option[StructType], replace: Boolean, global: Boolean, provider: String, options: Map[String, String]) extends RunnableCommand { if (tableIdent.database.isDefined) { throw new AnalysisException( s"Temporary view '$tableIdent' should not have specified a database") } override def argString: String = { s"[tableIdent:$tableIdent " + userSpecifiedSchema.map(_ + " ").getOrElse("") + s"replace:$replace " + s"provider:$provider " + CatalogUtils.maskCredentials(options) } def run(sparkSession: SparkSession): Seq[Row] = { val dataSource = DataSource( sparkSession, userSpecifiedSchema = userSpecifiedSchema, className = provider, options = options) val catalog = sparkSession.sessionState.catalog val viewDefinition = Dataset.ofRows( sparkSession, LogicalRelation(dataSource.resolveRelation())).logicalPlan if (global) { catalog.createGlobalTempView(tableIdent.table, viewDefinition, replace) } else { catalog.createTempView(tableIdent.table, viewDefinition, replace) } Seq.empty[Row] } } case class RefreshTable(tableIdent: TableIdentifier) extends RunnableCommand { override def run(sparkSession: SparkSession): Seq[Row] = { // Refresh the given table's metadata. If this table is cached as an InMemoryRelation, // drop the original cached version and make the new version cached lazily. sparkSession.catalog.refreshTable(tableIdent.quotedString) Seq.empty[Row] } } case class RefreshResource(path: String) extends RunnableCommand { override def run(sparkSession: SparkSession): Seq[Row] = { sparkSession.catalog.refreshByPath(path) Seq.empty[Row] } }
Example 8
Source File: SqlParserSuite.scala From iolap with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.catalyst import org.apache.spark.SparkFunSuite import org.apache.spark.sql.catalyst.expressions.Attribute import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.catalyst.plans.logical.Command private[sql] case class TestCommand(cmd: String) extends LogicalPlan with Command { override def output: Seq[Attribute] = Seq.empty override def children: Seq[LogicalPlan] = Seq.empty } private[sql] class SuperLongKeywordTestParser extends AbstractSparkSQLParser { protected val EXECUTE = Keyword("THISISASUPERLONGKEYWORDTEST") override protected lazy val start: Parser[LogicalPlan] = set private lazy val set: Parser[LogicalPlan] = EXECUTE ~> ident ^^ { case fileName => TestCommand(fileName) } } private[sql] class CaseInsensitiveTestParser extends AbstractSparkSQLParser { protected val EXECUTE = Keyword("EXECUTE") override protected lazy val start: Parser[LogicalPlan] = set private lazy val set: Parser[LogicalPlan] = EXECUTE ~> ident ^^ { case fileName => TestCommand(fileName) } } class SqlParserSuite extends SparkFunSuite { test("test long keyword") { val parser = new SuperLongKeywordTestParser assert(TestCommand("NotRealCommand") === parser.parse("ThisIsASuperLongKeyWordTest NotRealCommand")) } test("test case insensitive") { val parser = new CaseInsensitiveTestParser assert(TestCommand("NotRealCommand") === parser.parse("EXECUTE NotRealCommand")) assert(TestCommand("NotRealCommand") === parser.parse("execute NotRealCommand")) assert(TestCommand("NotRealCommand") === parser.parse("exEcute NotRealCommand")) } }
Example 9
Source File: SqlParserSuite.scala From spark1.52 with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.catalyst import org.apache.spark.SparkFunSuite import org.apache.spark.sql.catalyst.expressions.Attribute import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.catalyst.plans.logical.Command private[sql] case class TestCommand(cmd: String) extends LogicalPlan with Command { override def output: Seq[Attribute] = Seq.empty override def children: Seq[LogicalPlan] = Seq.empty } private[sql] class SuperLongKeywordTestParser extends AbstractSparkSQLParser { protected val EXECUTE = Keyword("THISISASUPERLONGKEYWORDTEST") override protected lazy val start: Parser[LogicalPlan] = set private lazy val set: Parser[LogicalPlan] = EXECUTE ~> ident ^^ { case fileName => TestCommand(fileName) } } private[sql] class CaseInsensitiveTestParser extends AbstractSparkSQLParser { protected val EXECUTE = Keyword("EXECUTE") override protected lazy val start: Parser[LogicalPlan] = set private lazy val set: Parser[LogicalPlan] = EXECUTE ~> ident ^^ { case fileName => TestCommand(fileName) } } class SqlParserSuite extends SparkFunSuite { test("test long keyword") { val parser = new SuperLongKeywordTestParser assert(TestCommand("NotRealCommand") === parser.parse("ThisIsASuperLongKeyWordTest NotRealCommand")) } test("test case insensitive") { val parser = new CaseInsensitiveTestParser assert(TestCommand("NotRealCommand") === parser.parse("EXECUTE NotRealCommand")) assert(TestCommand("NotRealCommand") === parser.parse("execute NotRealCommand")) assert(TestCommand("NotRealCommand") === parser.parse("exEcute NotRealCommand")) } }
Example 10
Source File: MergePlan.scala From spark-acid with Apache License 2.0 | 5 votes |
package org.apache.spark.sql.catalyst.parser.plans.logical import com.qubole.spark.hiveacid.merge.{MergeWhenClause} import org.apache.spark.sql.{SparkSession, SqlUtils} import org.apache.spark.sql.catalyst.expressions.{Attribute, Expression} import org.apache.spark.sql.catalyst.plans.logical.{Command, LogicalPlan} case class MergePlan(sourcePlan: LogicalPlan, targetPlan: LogicalPlan, condition: Expression, matched: Seq[MergeWhenClause], notMatched: Option[MergeWhenClause]) extends Command { override def children: Seq[LogicalPlan] = Seq(sourcePlan, targetPlan) override def output: Seq[Attribute] = Seq.empty } object MergePlan { def resolve(sparkSession: SparkSession, mergePlan: MergePlan): MergePlan = { MergeWhenClause.validate(mergePlan.matched ++ mergePlan.notMatched) val resolvedCondition = SqlUtils.resolveReferences(sparkSession, mergePlan.condition, mergePlan.children, true, None) val resolvedMatched = MergeWhenClause.resolve(sparkSession, mergePlan, mergePlan.matched) val resolvedNotMatched = mergePlan.notMatched.map { x => x.resolve(sparkSession, mergePlan) } MergePlan(mergePlan.sourcePlan, mergePlan.targetPlan, resolvedCondition, resolvedMatched, resolvedNotMatched) } }