Read large csv files with dask dataframe quickly
import dask.dataframe as dd n = [“column1”, “column2”, “column3”, “column4”] df = dd.read_csv(‘D:/BigData/data1.csv’, assume_missing=True, names=n) print(df.head())
import dask.dataframe as dd n = [“column1”, “column2”, “column3”, “column4”] df = dd.read_csv(‘D:/BigData/data1.csv’, assume_missing=True, names=n) print(df.head())