unixtimestamp only supports second precision. Looking at your values the precision is at milliseconds, the last 3 positions are milliseconds.
from pyspark.sql.functions import substring,unix_timestamp,col,to_timestamp,concat,lit,from_unixtime
df = spark.createDataFrame([('1578569683753',), ('1578569581793',),('1578569581993',)], ['TMS'])
df.show(3,False)
df.printSchema()
Result
+-------------+
|TMS |
+-------------+
|1578569683753|
|1578569581793|
|1578569581993|
+-------------+
root
|-- TMS: string (nullable = true)
Convert to the human-readable timestamp format
df1 = (df
.select("TMS"
,from_unixtime(substring(col("TMS"),1,10), format="yyyy-MM-dd HH:mm:sss").alias("TMS_WITHOUT_MILLISECONDS")
,(substring("TMS",11,3)).alias("MILLISECONDS")
,(concat(from_unixtime(substring(col("TMS"),1,10), format="yyyy-MM-dd HH:mm:sss"),lit('.'), substring(df.TMS,11,3))).alias("TMS_StringType")
,to_timestamp(concat(from_unixtime(substring(col("TMS"),1,10), format="yyyy-MM-dd HH:mm:sss"),lit('.'), substring(df.TMS,11,3))).alias("TMS_TimestampType")
)
)
df1.show(3,False)
df1.printSchema()
Output
+-------------+------------------------+------------+------------------------+-----------------------+
|TMS |TMS_WITHOUT_MILLISECONDS|MILLISECONDS|TMS_StringType |TMS_TimestampType |
+-------------+------------------------+------------+------------------------+-----------------------+
|1578569683753|2020-01-09 11:34:043 |753 |2020-01-09 11:34:043.753|2020-01-09 11:34:43.753|
|1578569581793|2020-01-09 11:33:001 |793 |2020-01-09 11:33:001.793|2020-01-09 11:33:01.793|
|1578569581993|2020-01-09 11:33:001 |993 |2020-01-09 11:33:001.993|2020-01-09 11:33:01.993|
+-------------+------------------------+------------+------------------------+-----------------------+
root
|-- TMS: string (nullable = true)
|-- TMS_WITHOUT_MILLISECONDS: string (nullable = true)
|-- MILLISECONDS: string (nullable = true)
|-- TMS_StringType: string (nullable = true)
|-- TMS_TimestampType: timestamp (nullable = true)