I need to insert a very large data set into a table for testing purposes. I have created a script for exactly that purpose that generates purely random data. It works, but it performs so slowly that I'll be here for a month of Sundays awaiting completion.
A few details:
- The sources table has 150-million records
- There are three statistics_per_source
- sleep time is 5
- It currently takes about 4-minutes for 1000 inserts
- PostgreSQL-12
Question: What steps can I take to make the script below operate much faster than it currently does or what alternative approach would I take to insert this magnitude of random data?
TRUNCATE TABLE public.statistic RESTART IDENTITY;
SELECT 'Creating View Statistics for Sources' as progress;
DO $$
DECLARE
sleep integer;
sps integer;
start integer := 1;
increment integer;
remaining integer := increment;
BEGIN
SELECT sleep INTO sleep FROM Constants;
SELECT statistics_per_source INTO sps FROM Constants;
SELECT commit_chunk_size INTO increment FROM Constants;
INSERT INTO Progress(dt, operation, progress) VALUES (now(), 'statistics from source', 'BEGIN INSERT');
LOOP
SELECT count(*) INTO remaining FROM source WHERE id > start LIMIT 1;
EXIT WHEN remaining = 0;
INSERT INTO Progress(dt, operation, progress) VALUES (now(), 'statistics from source', 'Beginning Source=' || start);
INSERT INTO statistic(created, value, classtype, source_id, source_created, brand)
SELECT
date(src.created + trunc(random() * 20) * '1 day'::interval) created,
(random() * 100000)::int,
CASE WHEN (random() > 0.5) THEN 'Views' ELSE 'CTR' END,
src.id,
src.created,
NULL
FROM source src
CROSS JOIN
(SELECT generate_series(1, sps) as value ) s
WHERE src.id between start + 1 and start + increment;
INSERT INTO Progress(dt, operation, progress) VALUES (now(), 'statistics from source', 'Committing source=' || start);
COMMIT;
PERFORM pg_sleep(sleep);
start := start + increment;
END LOOP ;
END $$;
The table looks like this; I've intentionally avoided creating indices for now for insert performance.
CREATE TABLE public.statistic
(
id bigint NOT NULL GENERATED ALWAYS AS IDENTITY ( INCREMENT 1 START 1 MINVALUE 1 MAXVALUE 9223372036854775807 CACHE 1 ),
created date NOT NULL,
value double precision NOT NULL,
classtype text COLLATE pg_catalog."default",
data_source integer,
production integer,
source_id bigint,
source_created date,
brand integer,
CONSTRAINT statistics_pk PRIMARY KEY (id)
)