| import streamlit as st |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
| @st.cache_resource |
| def load_model_and_tokenizer(): |
| model_name_or_path = "m42-health/med42-70b" |
| model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto") |
| tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) |
| return model, tokenizer |
|
|
| def generate_response(prompt): |
| prompt_template = f''' |
| <|system|>: You are a helpful medical assistant created by M42 Health in the UAE. |
| <|prompter|>:{prompt} |
| <|assistant|>: |
| ''' |
| input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda() |
| output = model.generate(inputs=input_ids, temperature=0.7, do_sample=True, eos_token_id=tokenizer.eos_token_id, pad_token_id=tokenizer.pad_token_id, max_new_tokens=512) |
| response = tokenizer.decode(output[0], skip_special_tokens=True) |
| return response |
|
|
| def main(): |
| st.title("M42 Health Medical Assistant") |
| model, tokenizer = load_model_and_tokenizer() |
|
|
| prompt = st.text_area("Enter your medical query:") |
| if st.button("Submit"): |
| with st.spinner("Generating response..."): |
| response = generate_response(prompt) |
| st.write(response) |
|
|
| if __name__ == "__main__": |
| main() |