0
import numpy as np
from transformers import GPTNeoForCausalLM, GPT2Tokenizer 
import coremltools as ct
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")

sentence_fragment = "The Oceans are"

class NEO(torch.nn.Module):
    def __init__(self, model):
        super(NEO, self).__init__()
        self.next_token_predictor = model
    
    def forward(self, x):
        sentence = x
        predictions, _ = self.next_token_predictor(sentence)
        token = torch.argmax(predictions[-1, :], dim=0, keepdim=True)
        sentence = torch.cat((sentence, token), 0)
        return sentence

token_predictor = GPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-125M", torchscript=True).eval()

context = torch.tensor(tokenizer.encode(sentence_fragment))
random_tokens = torch.randint(10000, (5,))
traced_token_predictor = torch.jit.trace(token_predictor, random_tokens)

model = NEO(model=traced_token_predictor)
scripted_model = torch.jit.script(model)

# Custom model

sentence_fragment = "The Oceans are"

for i in range(10):
    context = torch.tensor(tokenizer.encode(sentence_fragment))
    torch_out = scripted_model(context)
    sentence_fragment = tokenizer.decode(torch_out)
print("Custom model: {}".format(sentence_fragment))

# Stock model

model = GPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-125M", torchscript=True).eval()

sentence_fragment = "The Oceans are"

input_ids = tokenizer(sentence_fragment, return_tensors="pt").input_ids
gen_tokens = model.generate(input_ids, do_sample=True, max_length=20)
gen_text = tokenizer.batch_decode(gen_tokens)[0]
print("Stock model: "+gen_text)

RUN 1

Output:


Custom model: The Oceans are the most important source of water for the entire world
Stock model: The Oceans are on the rise. The American Southwest is thriving, but the southern United States still

RUN 2

Output:


Custom model: The Oceans are the most important source of water for the entire world. 
Stock model: The Oceans are the land of man

This is a short video of the Australian government

The custom model always returns the same output. However with the do_sampling = True stock model.generate return different results on each call. I spent a lot of time figuring out how do_sampling works for transformers so I require help from you guys, appreciate it.

How to code a custom model to have different results on each call?

Thanks!

1 Answers1

0

So, the answer would be to implement sampling :D

class NEO(torch.nn.Module):
    def __init__(self, model):
        super(NEO, self).__init__()
        self.next_token_predictor = model
    
    def forward(self, x):
        sentence = x
        predictions, _ = self.next_token_predictor(sentence)
        # get top K (k=2) indicies of highest probs of tokens
        # 2 indicies would be enough, anyway you will got 2 in a power of N variations
        _, topK = torch.topk(predictions[-1, :], 2, dim=0)
        # get one of two of those indicies randomly, and concat sentence
        perm = torch.randperm(topK.size(0))
        idx = perm[:1]
        token = topK[idx.long()]
        sentence = torch.cat((sentence, token), 0)
        return sentence