| import torch |
| from transformers import ( |
| AutoTokenizer, |
| AutoModelForSeq2SeqLM, |
| AdamW, |
| get_linear_schedule_with_warmup |
| ) |
|
|
| model = AutoModelForSeq2SeqLM.from_pretrained("ADELIB/ANQG") |
| tokenizer = AutoTokenizer.from_pretrained("ADELIB/ANQG") |
|
|
|
|
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| print("device",device) |
| model=model.to(device) |
|
|
| import gradio as gr |
| def generate__questions(cnt,ans): |
| text="context: " +cnt + " " + "answer: " + ans + " </s>" |
| text_encoding = tokenizer.encode_plus( |
| text,max_length=512,padding=True,return_tensors="pt" |
| ) |
| model.eval() |
| generated_ids = model.generate( |
| input_ids=text_encoding['input_ids'].to(device), |
| attention_mask=text_encoding['attention_mask'].to(device), |
| max_length=72, |
| early_stopping=True, |
| num_beams=5, |
| num_return_sequences=1 |
| ) |
|
|
| preds = [ |
| tokenizer.decode(gen_id,skip_special_tokens=True,clean_up_tokenization_spaces=True) |
| for gen_id in generated_ids |
| ] |
| |
| t=' '.join(preds) |
| st=t.replace('question: ',' ') |
| return(st) |
|
|
| demo = gr.Interface(fn=generate__questions, inputs=[gr.Textbox(label='Context'), |
| gr.Textbox(label='Answer')] , |
| outputs=gr.Textbox(label='Question'), |
| title="Arabic Question Generation", |
| description="Get the Question from given Context and a Answer") |
| demo.launch() |
|
|