Special Functions - col and litΒΆ
Let us understand special functions such as col and lit. These functions are typically used to convert the strings to column type.
First let us create Data Frame for demo purposes.
Let us start spark context for this Notebook so that we can execute the code provided. You can sign up for our 10 node state of the art cluster/labs to learn Spark SQL using our unique integrated LMS.
from pyspark.sql import SparkSession
import getpass
username = getpass.getuser()
spark = SparkSession. \
builder. \
config('spark.ui.port', '0'). \
config("spark.sql.warehouse.dir", f"/user/{username}/warehouse"). \
enableHiveSupport(). \
appName(f'{username} | Python - Processing Column Data'). \
master('yarn'). \
getOrCreate()
If you are going to use CLIs, you can use Spark SQL using one of the 3 approaches.
Using Spark SQL
spark2-sql \
--master yarn \
--conf spark.ui.port=0 \
--conf spark.sql.warehouse.dir=/user/${USER}/warehouse
Using Scala
spark2-shell \
--master yarn \
--conf spark.ui.port=0 \
--conf spark.sql.warehouse.dir=/user/${USER}/warehouse
Using Pyspark
pyspark2 \
--master yarn \
--conf spark.ui.port=0 \
--conf spark.sql.warehouse.dir=/user/${USER}/warehouse
employees = [(1, "Scott", "Tiger", 1000.0,
"united states", "+1 123 456 7890", "123 45 6789"
),
(2, "Henry", "Ford", 1250.0,
"India", "+91 234 567 8901", "456 78 9123"
),
(3, "Nick", "Junior", 750.0,
"united KINGDOM", "+44 111 111 1111", "222 33 4444"
),
(4, "Bill", "Gomes", 1500.0,
"AUSTRALIA", "+61 987 654 3210", "789 12 6118"
)
]
employeesDF = spark. \
createDataFrame(employees,
schema="""employee_id INT, first_name STRING,
last_name STRING, salary FLOAT, nationality STRING,
phone_number STRING, ssn STRING"""
)
For Data Frame APIs such as
select
,groupBy
,orderBy
etc we can pass column names as strings.
employeesDF. \
select("first_name", "last_name"). \
show()
+----------+---------+
|first_name|last_name|
+----------+---------+
| Scott| Tiger|
| Henry| Ford|
| Nick| Junior|
| Bill| Gomes|
+----------+---------+
employeesDF. \
groupBy("nationality"). \
count(). \
show()
+--------------+-----+
| nationality|count|
+--------------+-----+
| India| 1|
|united KINGDOM| 1|
| united states| 1|
| AUSTRALIA| 1|
+--------------+-----+
employeesDF. \
orderBy("employee_id"). \
show()
+-----------+----------+---------+------+--------------+----------------+-----------+
|employee_id|first_name|last_name|salary| nationality| phone_number| ssn|
+-----------+----------+---------+------+--------------+----------------+-----------+
| 1| Scott| Tiger|1000.0| united states| +1 123 456 7890|123 45 6789|
| 2| Henry| Ford|1250.0| India|+91 234 567 8901|456 78 9123|
| 3| Nick| Junior| 750.0|united KINGDOM|+44 111 111 1111|222 33 4444|
| 4| Bill| Gomes|1500.0| AUSTRALIA|+61 987 654 3210|789 12 6118|
+-----------+----------+---------+------+--------------+----------------+-----------+
If there are no transformations on any column in any function then we should be able to pass all column names as strings.
If not we need to pass all columns as type column by using col function.
If we want to apply transformations using some of the functions then passing column names as strings will not suffice. We have to pass them as column type.
from pyspark.sql.functions import col
employeesDF. \
select(col("first_name"), col("last_name")). \
show()
+----------+---------+
|first_name|last_name|
+----------+---------+
| Scott| Tiger|
| Henry| Ford|
| Nick| Junior|
| Bill| Gomes|
+----------+---------+
from pyspark.sql.functions import upper
employeesDF. \
select(upper("first_name"), upper("last_name")). \
show()
---------------------------------------------------------------------------
Py4JError Traceback (most recent call last)
<ipython-input-10-2f0306871c54> in <module>
1 employeesDF. \
----> 2 select(upper("first_name"), upper("last_name")). \
3 show()
/usr/hdp/current/spark2-client/python/pyspark/sql/functions.py in _(col)
40 def _(col):
41 sc = SparkContext._active_spark_context
---> 42 jc = getattr(sc._jvm.functions, name)(col._jc if isinstance(col, Column) else col)
43 return Column(jc)
44 _.__name__ = name
/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/java_gateway.py in __call__(self, *args)
1158 answer = self.gateway_client.send_command(command)
1159 return_value = get_return_value(
-> 1160 answer, self.gateway_client, self.target_id, self.name)
1161
1162 for temp_arg in temp_args:
/usr/hdp/current/spark2-client/python/pyspark/sql/utils.py in deco(*a, **kw)
61 def deco(*a, **kw):
62 try:
---> 63 return f(*a, **kw)
64 except py4j.protocol.Py4JJavaError as e:
65 s = e.java_exception.toString()
/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
322 raise Py4JError(
323 "An error occurred while calling {0}{1}{2}. Trace:\n{3}\n".
--> 324 format(target_id, ".", name, value))
325 else:
326 raise Py4JError(
Py4JError: An error occurred while calling z:org.apache.spark.sql.functions.upper. Trace:
py4j.Py4JException: Method upper([class java.lang.String]) does not exist
at py4j.reflection.ReflectionEngine.getMethod(ReflectionEngine.java:318)
at py4j.reflection.ReflectionEngine.getMethod(ReflectionEngine.java:339)
at py4j.Gateway.invoke(Gateway.java:276)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:214)
at java.lang.Thread.run(Thread.java:748)
col
is the function which will convert column name from string type to Column type. We can also refer column names as Column type using Data Frame name.
from pyspark.sql.functions import col, upper
employeesDF. \
select(upper(col("first_name")), upper(col("last_name"))). \
show()
+-----------------+----------------+
|upper(first_name)|upper(last_name)|
+-----------------+----------------+
| SCOTT| TIGER|
| HENRY| FORD|
| NICK| JUNIOR|
| BILL| GOMES|
+-----------------+----------------+
employeesDF. \
groupBy(upper(col("nationality"))). \
count(). \
show()
+------------------+-----+
|upper(nationality)|count|
+------------------+-----+
| UNITED KINGDOM| 1|
| INDIA| 1|
| AUSTRALIA| 1|
| UNITED STATES| 1|
+------------------+-----+
Also, if we want to use functions such as
alias
,desc
etc on columns then we have to pass the column names as column type (not as strings).
# This will fail as the function desc is available only on column type.
employeesDF. \
orderBy("employee_id".desc()). \
show()
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-14-fd95955a04d4> in <module>
1 # This will fail as the function desc is available only on column type.
2 employeesDF. \
----> 3 orderBy("employee_id".desc()). \
4 show()
AttributeError: 'str' object has no attribute 'desc'
# We can invoke desc on columns which are of type column
employeesDF. \
orderBy(col("employee_id").desc()). \
show()
+-----------+----------+---------+------+--------------+----------------+-----------+
|employee_id|first_name|last_name|salary| nationality| phone_number| ssn|
+-----------+----------+---------+------+--------------+----------------+-----------+
| 4| Bill| Gomes|1500.0| AUSTRALIA|+61 987 654 3210|789 12 6118|
| 3| Nick| Junior| 750.0|united KINGDOM|+44 111 111 1111|222 33 4444|
| 2| Henry| Ford|1250.0| India|+91 234 567 8901|456 78 9123|
| 1| Scott| Tiger|1000.0| united states| +1 123 456 7890|123 45 6789|
+-----------+----------+---------+------+--------------+----------------+-----------+
employeesDF. \
orderBy(col("first_name").desc()). \
show()
+-----------+----------+---------+------+--------------+----------------+-----------+
|employee_id|first_name|last_name|salary| nationality| phone_number| ssn|
+-----------+----------+---------+------+--------------+----------------+-----------+
| 1| Scott| Tiger|1000.0| united states| +1 123 456 7890|123 45 6789|
| 3| Nick| Junior| 750.0|united KINGDOM|+44 111 111 1111|222 33 4444|
| 2| Henry| Ford|1250.0| India|+91 234 567 8901|456 78 9123|
| 4| Bill| Gomes|1500.0| AUSTRALIA|+61 987 654 3210|789 12 6118|
+-----------+----------+---------+------+--------------+----------------+-----------+
# Alternative - we can also refer column names using Data Frame like this
employeesDF. \
orderBy(upper(employeesDF['first_name']).alias('first_name')). \
show()
+-----------+----------+---------+------+--------------+----------------+-----------+
|employee_id|first_name|last_name|salary| nationality| phone_number| ssn|
+-----------+----------+---------+------+--------------+----------------+-----------+
| 4| Bill| Gomes|1500.0| AUSTRALIA|+61 987 654 3210|789 12 6118|
| 2| Henry| Ford|1250.0| India|+91 234 567 8901|456 78 9123|
| 3| Nick| Junior| 750.0|united KINGDOM|+44 111 111 1111|222 33 4444|
| 1| Scott| Tiger|1000.0| united states| +1 123 456 7890|123 45 6789|
+-----------+----------+---------+------+--------------+----------------+-----------+
# Alternative - we can also refer column names using Data Frame like this
employeesDF. \
orderBy(upper(employeesDF.first_name).alias('first_name')). \
show()
+-----------+----------+---------+------+--------------+----------------+-----------+
|employee_id|first_name|last_name|salary| nationality| phone_number| ssn|
+-----------+----------+---------+------+--------------+----------------+-----------+
| 4| Bill| Gomes|1500.0| AUSTRALIA|+61 987 654 3210|789 12 6118|
| 2| Henry| Ford|1250.0| India|+91 234 567 8901|456 78 9123|
| 3| Nick| Junior| 750.0|united KINGDOM|+44 111 111 1111|222 33 4444|
| 1| Scott| Tiger|1000.0| united states| +1 123 456 7890|123 45 6789|
+-----------+----------+---------+------+--------------+----------------+-----------+
Sometimes, we want to add a literal to the column values. For example, we might want to concatenate first_name and last_name separated by comma and space in between.
from pyspark.sql.functions import concat
# Same as above
employeesDF. \
select(concat(col("first_name"), ", ", col("last_name"))). \
show()
---------------------------------------------------------------------------
Py4JJavaError Traceback (most recent call last)
/usr/hdp/current/spark2-client/python/pyspark/sql/utils.py in deco(*a, **kw)
62 try:
---> 63 return f(*a, **kw)
64 except py4j.protocol.Py4JJavaError as e:
/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
319 "An error occurred while calling {0}{1}{2}.\n".
--> 320 format(target_id, ".", name), value)
321 else:
Py4JJavaError: An error occurred while calling o80.select.
: org.apache.spark.sql.AnalysisException: cannot resolve '`, `' given input columns: [nationality, last_name, employee_id, phone_number, ssn, first_name, salary];;
'Project [concat(first_name#1, ', , last_name#2) AS concat(first_name, , , last_name)#212]
+- AnalysisBarrier
+- LogicalRDD [employee_id#0, first_name#1, last_name#2, salary#3, nationality#4, phone_number#5, ssn#6], false
at org.apache.spark.sql.catalyst.analysis.package$AnalysisErrorAt.failAnalysis(package.scala:42)
at org.apache.spark.sql.catalyst.analysis.CheckAnalysis$$anonfun$checkAnalysis$1$$anonfun$apply$2.applyOrElse(CheckAnalysis.scala:88)
at org.apache.spark.sql.catalyst.analysis.CheckAnalysis$$anonfun$checkAnalysis$1$$anonfun$apply$2.applyOrElse(CheckAnalysis.scala:85)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformUp$1.apply(TreeNode.scala:289)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformUp$1.apply(TreeNode.scala:289)
at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:70)
at org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:288)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:286)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:286)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4$$anonfun$apply$11.apply(TreeNode.scala:335)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
at scala.collection.AbstractTraversable.map(Traversable.scala:104)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:333)
at org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187)
at org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:304)
at org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:286)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:286)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:286)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:306)
at org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187)
at org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:304)
at org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:286)
at org.apache.spark.sql.catalyst.plans.QueryPlan$$anonfun$transformExpressionsUp$1.apply(QueryPlan.scala:95)
at org.apache.spark.sql.catalyst.plans.QueryPlan$$anonfun$transformExpressionsUp$1.apply(QueryPlan.scala:95)
at org.apache.spark.sql.catalyst.plans.QueryPlan$$anonfun$1.apply(QueryPlan.scala:107)
at org.apache.spark.sql.catalyst.plans.QueryPlan$$anonfun$1.apply(QueryPlan.scala:107)
at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:70)
at org.apache.spark.sql.catalyst.plans.QueryPlan.transformExpression$1(QueryPlan.scala:106)
at org.apache.spark.sql.catalyst.plans.QueryPlan.org$apache$spark$sql$catalyst$plans$QueryPlan$$recursiveTransform$1(QueryPlan.scala:118)
at org.apache.spark.sql.catalyst.plans.QueryPlan$$anonfun$org$apache$spark$sql$catalyst$plans$QueryPlan$$recursiveTransform$1$1.apply(QueryPlan.scala:122)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
at scala.collection.AbstractTraversable.map(Traversable.scala:104)
at org.apache.spark.sql.catalyst.plans.QueryPlan.org$apache$spark$sql$catalyst$plans$QueryPlan$$recursiveTransform$1(QueryPlan.scala:122)
at org.apache.spark.sql.catalyst.plans.QueryPlan$$anonfun$2.apply(QueryPlan.scala:127)
at org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187)
at org.apache.spark.sql.catalyst.plans.QueryPlan.mapExpressions(QueryPlan.scala:127)
at org.apache.spark.sql.catalyst.plans.QueryPlan.transformExpressionsUp(QueryPlan.scala:95)
at org.apache.spark.sql.catalyst.analysis.CheckAnalysis$$anonfun$checkAnalysis$1.apply(CheckAnalysis.scala:85)
at org.apache.spark.sql.catalyst.analysis.CheckAnalysis$$anonfun$checkAnalysis$1.apply(CheckAnalysis.scala:80)
at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:127)
at org.apache.spark.sql.catalyst.analysis.CheckAnalysis$class.checkAnalysis(CheckAnalysis.scala:80)
at org.apache.spark.sql.catalyst.analysis.Analyzer.checkAnalysis(Analyzer.scala:92)
at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:105)
at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:57)
at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:55)
at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:47)
at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:74)
at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$withPlan(Dataset.scala:3295)
at org.apache.spark.sql.Dataset.select(Dataset.scala:1307)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
at py4j.Gateway.invoke(Gateway.java:282)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:214)
at java.lang.Thread.run(Thread.java:748)
During handling of the above exception, another exception occurred:
AnalysisException Traceback (most recent call last)
<ipython-input-20-a50db305857c> in <module>
1 # Same as above
2 employeesDF. \
----> 3 select(concat(col("first_name"), ", ", col("last_name"))). \
4 show()
/usr/hdp/current/spark2-client/python/pyspark/sql/dataframe.py in select(self, *cols)
1200 [Row(name=u'Alice', age=12), Row(name=u'Bob', age=15)]
1201 """
-> 1202 jdf = self._jdf.select(self._jcols(*cols))
1203 return DataFrame(jdf, self.sql_ctx)
1204
/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/java_gateway.py in __call__(self, *args)
1158 answer = self.gateway_client.send_command(command)
1159 return_value = get_return_value(
-> 1160 answer, self.gateway_client, self.target_id, self.name)
1161
1162 for temp_arg in temp_args:
/usr/hdp/current/spark2-client/python/pyspark/sql/utils.py in deco(*a, **kw)
67 e.java_exception.getStackTrace()))
68 if s.startswith('org.apache.spark.sql.AnalysisException: '):
---> 69 raise AnalysisException(s.split(': ', 1)[1], stackTrace)
70 if s.startswith('org.apache.spark.sql.catalyst.analysis'):
71 raise AnalysisException(s.split(': ', 1)[1], stackTrace)
AnalysisException: "cannot resolve '`, `' given input columns: [nationality, last_name, employee_id, phone_number, ssn, first_name, salary];;\n'Project [concat(first_name#1, ', , last_name#2) AS concat(first_name, , , last_name)#212]\n+- AnalysisBarrier\n +- LogicalRDD [employee_id#0, first_name#1, last_name#2, salary#3, nationality#4, phone_number#5, ssn#6], false\n"
# Referring columns using Data Frame
employeesDF. \
select(concat(employeesDF["first_name"], ", ", employeesDF["last_name"])). \
show()
---------------------------------------------------------------------------
Py4JJavaError Traceback (most recent call last)
/usr/hdp/current/spark2-client/python/pyspark/sql/utils.py in deco(*a, **kw)
62 try:
---> 63 return f(*a, **kw)
64 except py4j.protocol.Py4JJavaError as e:
/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
319 "An error occurred while calling {0}{1}{2}.\n".
--> 320 format(target_id, ".", name), value)
321 else:
Py4JJavaError: An error occurred while calling o80.select.
: org.apache.spark.sql.AnalysisException: cannot resolve '`, `' given input columns: [nationality, last_name, employee_id, phone_number, ssn, first_name, salary];;
'Project [concat(first_name#1, ', , last_name#2) AS concat(first_name, , , last_name)#214]
+- AnalysisBarrier
+- LogicalRDD [employee_id#0, first_name#1, last_name#2, salary#3, nationality#4, phone_number#5, ssn#6], false
at org.apache.spark.sql.catalyst.analysis.package$AnalysisErrorAt.failAnalysis(package.scala:42)
at org.apache.spark.sql.catalyst.analysis.CheckAnalysis$$anonfun$checkAnalysis$1$$anonfun$apply$2.applyOrElse(CheckAnalysis.scala:88)
at org.apache.spark.sql.catalyst.analysis.CheckAnalysis$$anonfun$checkAnalysis$1$$anonfun$apply$2.applyOrElse(CheckAnalysis.scala:85)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformUp$1.apply(TreeNode.scala:289)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformUp$1.apply(TreeNode.scala:289)
at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:70)
at org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:288)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:286)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:286)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4$$anonfun$apply$11.apply(TreeNode.scala:335)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
at scala.collection.AbstractTraversable.map(Traversable.scala:104)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:333)
at org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187)
at org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:304)
at org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:286)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:286)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:286)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:306)
at org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187)
at org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:304)
at org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:286)
at org.apache.spark.sql.catalyst.plans.QueryPlan$$anonfun$transformExpressionsUp$1.apply(QueryPlan.scala:95)
at org.apache.spark.sql.catalyst.plans.QueryPlan$$anonfun$transformExpressionsUp$1.apply(QueryPlan.scala:95)
at org.apache.spark.sql.catalyst.plans.QueryPlan$$anonfun$1.apply(QueryPlan.scala:107)
at org.apache.spark.sql.catalyst.plans.QueryPlan$$anonfun$1.apply(QueryPlan.scala:107)
at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:70)
at org.apache.spark.sql.catalyst.plans.QueryPlan.transformExpression$1(QueryPlan.scala:106)
at org.apache.spark.sql.catalyst.plans.QueryPlan.org$apache$spark$sql$catalyst$plans$QueryPlan$$recursiveTransform$1(QueryPlan.scala:118)
at org.apache.spark.sql.catalyst.plans.QueryPlan$$anonfun$org$apache$spark$sql$catalyst$plans$QueryPlan$$recursiveTransform$1$1.apply(QueryPlan.scala:122)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
at scala.collection.TraversableLike$$anonfun$map$1.apply(TraversableLike.scala:234)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at scala.collection.TraversableLike$class.map(TraversableLike.scala:234)
at scala.collection.AbstractTraversable.map(Traversable.scala:104)
at org.apache.spark.sql.catalyst.plans.QueryPlan.org$apache$spark$sql$catalyst$plans$QueryPlan$$recursiveTransform$1(QueryPlan.scala:122)
at org.apache.spark.sql.catalyst.plans.QueryPlan$$anonfun$2.apply(QueryPlan.scala:127)
at org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187)
at org.apache.spark.sql.catalyst.plans.QueryPlan.mapExpressions(QueryPlan.scala:127)
at org.apache.spark.sql.catalyst.plans.QueryPlan.transformExpressionsUp(QueryPlan.scala:95)
at org.apache.spark.sql.catalyst.analysis.CheckAnalysis$$anonfun$checkAnalysis$1.apply(CheckAnalysis.scala:85)
at org.apache.spark.sql.catalyst.analysis.CheckAnalysis$$anonfun$checkAnalysis$1.apply(CheckAnalysis.scala:80)
at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:127)
at org.apache.spark.sql.catalyst.analysis.CheckAnalysis$class.checkAnalysis(CheckAnalysis.scala:80)
at org.apache.spark.sql.catalyst.analysis.Analyzer.checkAnalysis(Analyzer.scala:92)
at org.apache.spark.sql.catalyst.analysis.Analyzer.executeAndCheck(Analyzer.scala:105)
at org.apache.spark.sql.execution.QueryExecution.analyzed$lzycompute(QueryExecution.scala:57)
at org.apache.spark.sql.execution.QueryExecution.analyzed(QueryExecution.scala:55)
at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:47)
at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:74)
at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$withPlan(Dataset.scala:3295)
at org.apache.spark.sql.Dataset.select(Dataset.scala:1307)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
at py4j.Gateway.invoke(Gateway.java:282)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:214)
at java.lang.Thread.run(Thread.java:748)
During handling of the above exception, another exception occurred:
AnalysisException Traceback (most recent call last)
<ipython-input-23-3ca9edfc85ec> in <module>
1 # Referring columns using Data Frame
2 employeesDF. \
----> 3 select(concat(employeesDF["first_name"], ", ", employeesDF["last_name"])). \
4 show()
/usr/hdp/current/spark2-client/python/pyspark/sql/dataframe.py in select(self, *cols)
1200 [Row(name=u'Alice', age=12), Row(name=u'Bob', age=15)]
1201 """
-> 1202 jdf = self._jdf.select(self._jcols(*cols))
1203 return DataFrame(jdf, self.sql_ctx)
1204
/usr/hdp/current/spark2-client/python/lib/py4j-0.10.6-src.zip/py4j/java_gateway.py in __call__(self, *args)
1158 answer = self.gateway_client.send_command(command)
1159 return_value = get_return_value(
-> 1160 answer, self.gateway_client, self.target_id, self.name)
1161
1162 for temp_arg in temp_args:
/usr/hdp/current/spark2-client/python/pyspark/sql/utils.py in deco(*a, **kw)
67 e.java_exception.getStackTrace()))
68 if s.startswith('org.apache.spark.sql.AnalysisException: '):
---> 69 raise AnalysisException(s.split(': ', 1)[1], stackTrace)
70 if s.startswith('org.apache.spark.sql.catalyst.analysis'):
71 raise AnalysisException(s.split(': ', 1)[1], stackTrace)
AnalysisException: "cannot resolve '`, `' given input columns: [nationality, last_name, employee_id, phone_number, ssn, first_name, salary];;\n'Project [concat(first_name#1, ', , last_name#2) AS concat(first_name, , , last_name)#214]\n+- AnalysisBarrier\n +- LogicalRDD [employee_id#0, first_name#1, last_name#2, salary#3, nationality#4, phone_number#5, ssn#6], false\n"
If we pass the literals directly in the form of string or numeric type, then it will fail. It will search for column by name using the string passed. In this example, it will check for column by name
,
(comma followed by space).We have to convert literals to column type by using
lit
function.
from pyspark.sql.functions import concat, col, lit
employeesDF. \
select(concat(col("first_name"),
lit(", "),
col("last_name")
).alias("full_name")
). \
show(truncate=False)
+------------+
|full_name |
+------------+
|Scott, Tiger|
|Henry, Ford |
|Nick, Junior|
|Bill, Gomes |
+------------+