# Formatting
block_size = 128 # or any number suitable to your context
def group_texts(examples):
# Concatenate all 'input_ids'
concatenated_examples = sum(examples["input_ids"], [])
total_length = len(concatenated_examples)
# Organize into sequences of fixed length
sequences = [
concatenated_examples[i : i + block_size]
for i in range(0, total_length, block_size)
]
result = {
"input_ids": sequences,
# Shift the labels for CLM
"labels": [sequence[1:] + [tokenizer.eos_token_id] for sequence in sequences],
}
return result
tokenized_dataset = tokenized_dataset.map(
group_texts,
batched=True,
batch_size=1000, # or any number suitable to your context
I am not getting what the block_size and the batch_size refers to?