|
| 1 | +import pyarrow.parquet as pq |
| 2 | +from .utilities import create_file_object, df_generator, cast_pandas |
| 3 | +from ._base_copy import BaseCopy |
| 4 | +from typing import Optional |
| 5 | + |
| 6 | + |
| 7 | +class ParquetCopy(BaseCopy): |
| 8 | + """ |
| 9 | + Class for handling a standard case of reading a Parquet file into a Pandas |
| 10 | + DataFrame, iterating over it in chunks, and COPYing to PostgreSQL via StringIO CSV |
| 11 | + """ |
| 12 | + |
| 13 | + def __init__( |
| 14 | + self, |
| 15 | + file_name: str, |
| 16 | + defer_sql_objs: bool = False, |
| 17 | + conn=None, |
| 18 | + table_obj=None, |
| 19 | + sql_table: Optional[str] = None, |
| 20 | + schema: Optional[str] = None, |
| 21 | + csv_chunksize=10**6, |
| 22 | + parquet_chunksize=10**7, |
| 23 | + ): |
| 24 | + super().__init__(defer_sql_objs, conn, table_obj, sql_table, csv_chunksize) |
| 25 | + |
| 26 | + self.parquet_file = pq.ParquetFile(file_name) |
| 27 | + self.parquet_chunksize = parquet_chunksize |
| 28 | + self.total_rows = self.parquet_file.metadata.num_rows |
| 29 | + |
| 30 | + self.logger.info("*** {} ***".format(file_name)) |
| 31 | + |
| 32 | + if self.total_rows > self.parquet_chunksize: |
| 33 | + if self.total_rows % self.parquet_chunksize: |
| 34 | + self.total_chunks = (self.total_rows // self.parquet_chunksize) + 1 |
| 35 | + else: |
| 36 | + self.total_chunks = self.total_rows // self.parquet_chunksize |
| 37 | + |
| 38 | + self.big_copy = True |
| 39 | + else: |
| 40 | + self.total_chunks = 1 |
| 41 | + self.big_copy = False |
| 42 | + |
| 43 | + def copy(self, data_formatters=[cast_pandas], data_formatter_kwargs={}): |
| 44 | + """ |
| 45 | + Go through sequence to COPY data to PostgreSQL table, including dropping Primary |
| 46 | + and Foreign Keys to optimize speed, TRUNCATE table, COPY data, recreate keys, |
| 47 | + and run ANALYZE. |
| 48 | +
|
| 49 | + Parameters |
| 50 | + ---------- |
| 51 | + data_formatters: list of functions to apply to df during sequence. Note that |
| 52 | + each of these functions should be able to handle kwargs for one another |
| 53 | + data_formatter_kwargs: list of kwargs to pass to data_formatters functions |
| 54 | + """ |
| 55 | + self.drop_fks() |
| 56 | + self.drop_pk() |
| 57 | + |
| 58 | + # These need to be one transaction to use COPY FREEZE |
| 59 | + with self.conn.begin(): |
| 60 | + self.truncate() |
| 61 | + if self.big_copy: |
| 62 | + self.big_parquet_to_pg( |
| 63 | + data_formatters=data_formatters, |
| 64 | + data_formatter_kwargs=data_formatter_kwargs, |
| 65 | + ) |
| 66 | + else: |
| 67 | + self.parquet_to_pg( |
| 68 | + data_formatters=data_formatters, |
| 69 | + data_formatter_kwargs=data_formatter_kwargs, |
| 70 | + ) |
| 71 | + |
| 72 | + self.create_pk() |
| 73 | + self.create_fks() |
| 74 | + self.analyze() |
| 75 | + |
| 76 | + def parquet_to_pg(self, data_formatters=[cast_pandas], data_formatter_kwargs={}): |
| 77 | + self.logger.info("Reading Parquet file") |
| 78 | + df = self.parquet_file.read().to_pandas() |
| 79 | + |
| 80 | + self.logger.info("Formatting data") |
| 81 | + df = self.data_formatting( |
| 82 | + df, functions=data_formatters, **data_formatter_kwargs |
| 83 | + ) |
| 84 | + |
| 85 | + self.logger.info("Creating generator for chunking dataframe") |
| 86 | + for chunk in df_generator(df, self.csv_chunksize, logger=self.logger): |
| 87 | + self.logger.info("Creating CSV in memory") |
| 88 | + fo = create_file_object(chunk) |
| 89 | + |
| 90 | + self.logger.info("Copying chunk to database") |
| 91 | + self.copy_from_file(fo) |
| 92 | + del fo |
| 93 | + del df |
| 94 | + self.logger.info(f"All chunks copied ({self.total_rows} rows)") |
| 95 | + |
| 96 | + def big_parquet_to_pg( |
| 97 | + self, data_formatters=[cast_pandas], data_formatter_kwargs={} |
| 98 | + ): |
| 99 | + copied_rows = 0 |
| 100 | + n_chunk = 0 |
| 101 | + for batch in self.parquet_file.iter_batches(batch_size=self.parquet_chunksize): |
| 102 | + n_chunk += 1 |
| 103 | + self.logger.info(f"*** Parquet chunk {n_chunk} of {self.total_chunks} ***") |
| 104 | + df = batch.to_pandas() |
| 105 | + batch_rows = df.shape[0] |
| 106 | + |
| 107 | + self.logger.info("Formatting data") |
| 108 | + df = self.data_formatting( |
| 109 | + df, functions=data_formatters, **data_formatter_kwargs |
| 110 | + ) |
| 111 | + |
| 112 | + self.logger.info("Creating generator for chunking dataframe") |
| 113 | + for chunk in df_generator(df, self.csv_chunksize, logger=self.logger): |
| 114 | + self.logger.info("Creating CSV in memory") |
| 115 | + fo = create_file_object(chunk) |
| 116 | + |
| 117 | + self.logger.info("Copying chunk to database") |
| 118 | + self.copy_from_file(fo) |
| 119 | + del fo |
| 120 | + del df |
| 121 | + |
| 122 | + copied_rows += batch_rows |
| 123 | + |
| 124 | + self.logger.info(f"Copied {copied_rows:,} of {self.total_rows:,} rows") |
| 125 | + |
| 126 | + self.logger.info(f"All chunks copied ({self.total_rows:,} rows)") |
0 commit comments