The questioner must have solve the problem , for others whom might land upon this question , my answer might help.
Try this solution (convert it as per your dataset) , I tried on 50-80 GB it worked adding numpy will improve the performance.
import pandas as pd
from datetime import date
from datetime import datetime
print("1 Load rec =", datetime.now())
df6 = pd.read_csv('sampleDataframeFlux/sampleDataframeFlux.csv',low_memory=False, memory_map=True,engine='c',na_filter=False,index_col=False,usecols=["time", "Label","Server","value"])
print("shape",df6.shape)
print("2 Create dataframe =",datetime.now())
df6["Label"]=df6["Server"]+"|"+df6["Label"]
df6.drop(['Server'],axis=1,inplace=True)
print("3 Time trim =", datetime.now())
df6['time']=df6['time']//1000000000
print("shape",df6.shape)
print("4 Round Epoch to nearest multiple of 5 =", datetime.now())
df6['time']=5*round(df6['time']/5)
print("shape",df6.shape)
print("5 Pivot dataframe=", datetime.now())
df6=df6.pivot_table(index='time', columns=["Label"],values="value",fill_value=0)
print("shape",df6.shape)
print("6 Epoch to UTC =", datetime.now())
df6.index=pd.to_datetime(df6.index, unit='s')
print("7 Convert to type category to reduce memory =", datetime.now())
df6=df6.astype('category')
print("shape",df6.shape)
print("8 Start to write to a file =", datetime.now())
df6.to_csv('file_11.csv', header=True, chunksize=500000)
print("9 Finish =", datetime.now())