I'm trying to play with pytorch ddp using torchrun. However, the script always crash at the line with the first # FIXME. The file uses an IMDB dataset to do text classification.
Code:
# newer command: CUDA_LAUNCH_BLOCKING=1 torchrun --standalone --nnodes=1 --nproc-per-node=4 learn_ddp.py
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchtext
import torch.distributed
import torch.utils.data
import torch.utils.data.distributed
from torchtext.datasets import IMDB
from torchtext.datasets.imdb import NUM_LINES
from torchtext.data import get_tokenizer
from torchtext.vocab import build_vocab_from_iterator
from torchtext.data.functional import to_map_style_dataset
import sys
import os
import logging
logging.basicConfig(
level=logging.WARNING,
stream=sys.stdout,
format="%(asctime)s %(levelname)s: %(message)s",
)
VOCAB_SIZE = 15000
class GCNN(nn.Module):
def __init__(self, vocab_size=VOCAB_SIZE, embedding_dim=64, num_class=2):
super().__init__()
self.embedding_table = nn.Embedding(vocab_size, embedding_dim)
nn.init.xavier_uniform_(self.embedding_table.weight)
self.conv_A_1 = nn.Conv1d(embedding_dim, 64, 15, stride=7)
self.conv_B_1 = nn.Conv1d(embedding_dim, 64, 15, stride=7)
self.conv_A_2 = nn.Conv1d(64, 64, 15, stride=7)
self.conv_B_2 = nn.Conv1d(64, 64, 15, stride=7)
self.output_linear1 = nn.Linear(64, 128)
self.output_linear2 = nn.Linear(128, num_class)
def forward(self, word_index):
# define GCN forward operation,output logits based on input word_index
# 1. get word_embedding from word_index
# word_index shape:[bs, max_seq_len]
word_embedding = self.embedding_table(word_index) # [bs, max_seq_len, embedding_dim] FIXME
# 2. first layer Conv1d
word_embedding = word_embedding.transpose(1, 2) # [bs, embedding_dim, max_seq_len]
A = self.conv_A_1(word_embedding)
B = self.conv_B_1(word_embedding)
H = A * torch.sigmoid(B) # [bs, 64, max_seq_len]
A = self.conv_A_2(H)
B = self.conv_B_2(H)
H = A * torch.sigmoid(B) # [bs, 64, max_seq_len]
# 3. pooling and linear
pool_output = torch.mean(H, dim=-1) # avg pooling,get [bs, 64]
linear1_output = self.output_linear1(pool_output)
logits = self.output_linear2(linear1_output) # [bs, 2]
return logits
class TextClassificationModel(nn.Module):
""" simple embeddingbag+DNN model """
def __init__(self, vocab_size=VOCAB_SIZE, embed_dim=64, num_class=2):
super().__init__()
self.embedding = nn.EmbeddingBag(vocab_size, embed_dim, sparse=False)
self.fc = nn.Linear(embed_dim, num_class)
def forward(self, token_index):
embedded = self.embedding(token_index) # shape: [bs, embedding_dim]
return self.fc(embedded)
# step2 IMDB DataLoader
BATCH_SIZE = 64
def yield_tokens(train_data_iter, tokenizer):
for i, sample in enumerate(train_data_iter):
label, comment = sample
yield tokenizer(comment)
def collate_fn(batch):
"""post processing for DataLoader minibatch"""
target = []
token_index = []
max_length = 0
for i, (label, comment) in enumerate(batch):
tokens = tokenizer(comment)
token_index.append(vocab(tokens))
if len(tokens) > max_length:
max_length = len(tokens)
if label == "pos":
target.append(0)
else:
target.append(1)
token_index = [index + [0]*(max_length-len(index)) for index in token_index]
return (torch.tensor(target).to(torch.int64), torch.tensor(token_index).to(torch.int32))
# step3
def train(local_rank, train_dataset, eval_dataset, model, optimizer, num_epoch, log_step_interval, save_step_interval, eval_step_interval, save_path, resume=""):
""" dataloader as map-style dataset """
start_epoch = 0
start_step = 0
if resume != "":
# loading from checkpoint
logging.warning(f"loading from {resume}")
checkpoint = torch.load(resume, map_location=torch.device("cuda:0")) # cpu,cuda,cuda:index
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
start_epoch = checkpoint['epoch']
start_step = checkpoint['step']
# model = nn.parallel.DistributedDataParallel(model.cuda(local_rank), device_ids=[local_rank])
model = model.cuda(local_rank)
model = nn.parallel.DistributedDataParallel(model, device_ids=[local_rank])
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
train_data_loader = torch.utils.data.DataLoader(train_dataset, batch_size=BATCH_SIZE, collate_fn=collate_fn, sampler=train_sampler)
eval_data_loader = torch.utils.data.DataLoader(eval_dataset, batch_size=BATCH_SIZE, collate_fn=collate_fn)
for epoch_index in range(start_epoch, num_epoch):
ema_loss = 0.
num_batches = len(train_data_loader)
train_sampler.set_epoch(epoch_index) # randomize data for each GPU on different epoch
for batch_index, (target, token_index) in enumerate(train_data_loader):
optimizer.zero_grad()
step = num_batches*(epoch_index) + batch_index + 1
# token_index = token_index.cuda(local_rank)
target = target.cuda(local_rank)
print(f"-----{token_index.shape}----")
logits = model(token_index) # FIXME
logging.error("passed this point")
bce_loss = F.binary_cross_entropy(torch.sigmoid(logits), F.one_hot(target, num_classes=2).to(torch.float32))
ema_loss = 0.9*ema_loss + 0.1*bce_loss
bce_loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 0.1)
optimizer.step()
if step % log_step_interval == 0:
logging.warning(f"epoch_index: {epoch_index}, batch_index: {batch_index}, ema_loss: {ema_loss.item()}")
if step % save_step_interval == 0 and local_rank == 0:
os.makedirs(save_path, exist_ok=True)
save_file = os.path.join(save_path, f"step_{step}.pt")
torch.save({
'epoch': epoch_index,
'step': step,
'model_state_dict': model.module.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'loss': bce_loss,
}, save_file)
logging.warning(f"checkpoint has been saved in {save_file}")
if step % eval_step_interval == 0: # validation
logging.warning("start to do evaluation...")
model.eval()
ema_eval_loss = 0
total_acc_account = 0
total_account = 0
for eval_batch_index, (eval_target, eval_token_index) in enumerate(eval_data_loader):
total_account += eval_target.shape[0]
eval_logits = model(eval_token_index)
eval_target = eval_target.cuda(local_rank)
total_acc_account += (torch.argmax(eval_logits, dim=-1) == eval_target).sum().item()
eval_bce_loss = F.binary_cross_entropy(torch.sigmoid(eval_logits), F.one_hot(eval_target, num_classes=2).to(torch.float32))
ema_eval_loss = 0.9*ema_eval_loss + 0.1*eval_bce_loss
acc = total_acc_account/total_account
logging.warning(f"eval_ema_loss: {ema_eval_loss.item()}, eval_acc: {acc.item()}")
model.train()
# step4 testing
if __name__ == "__main__":
local_rank = int(os.environ['LOCAL_RANK'])
local_rank = local_rank % torch.cuda.device_count()
# local_rank = torch.distributed.get_rank()
if torch.cuda.is_available():
logging.warning("Cuda is available!")
if torch.cuda.device_count() > 1:
logging.warning(f"Found {torch.cuda.device_count()} GPUs!")
else:
logging.warning("Too few GPU!")
exit()
else:
logging.warning("Cuda is not available! Exit!")
exit()
torch.distributed.init_process_group("nccl")
train_data_iter = IMDB(root='../data', split='train')
tokenizer = get_tokenizer("basic_english")
vocab = build_vocab_from_iterator(yield_tokens(train_data_iter, tokenizer), min_freq=20, specials=["<unk>"])
vocab.set_default_index(0)
print(f"Size of vocab: {len(vocab)}")
model = GCNN()
# model = TextClassificationModel()
print("Model parameters #:", sum(p.numel() for p in model.parameters()))
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# train_data_loader = torch.utils.data.DataLoader(to_map_style_dataset(train_data_iter), batch_size=BATCH_SIZE, collate_fn=collate_fn, shuffle=False)
eval_data_iter = IMDB(root='../data', split='test')
# eval_data_loader = torch.utils.data.DataLoader(to_map_style_dataset(eval_data_iter), batch_size=BATCH_SIZE, collate_fn=collate_fn)
resume = ""
train(local_rank, to_map_style_dataset(train_data_iter), to_map_style_dataset(eval_data_iter), model, optimizer, num_epoch=10, log_step_interval=20, save_step_interval=500, eval_step_interval=300, save_path="./logs_imdb_text_classification", resume=resume)
torch.distributed.destroy_process_group()
The torchrun command I used is shown at the start of the file.
I checked the nn.embedding, but the num_embeddings
is set to be way greater than required, which shouldn't cause problems.