| |
| |
|
|
|
|
|
|
| import os |
| import gradio as gr |
| from huggingface_hub import hf_hub_download, login |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
| from pptx import Presentation |
| from pptx.util import Inches, Pt |
| import torch |
| from llama_cpp import Llama |
| import time |
|
|
| |
| TEXT_MODELS = { |
| "Utter-Project_EuroLLM-1.7B": "utter-project/EuroLLM-1.7B", |
| "Mistral Nemo 2407 (GGUF)": "MisterAI/Bartowski_MistralAI_Mistral-Nemo-Instruct-2407-IQ4_XS.gguf", |
| "Mixtral 8x7B": "mistralai/Mixtral-8x7B-v0.1", |
| "Lucie 7B": "OpenLLM-France/Lucie-7B" |
| } |
|
|
| PREPROMPT = """Vous êtes un assistant IA expert en création de présentations PowerPoint professionnelles. |
| Générez une présentation structurée et détaillée au format Markdown en suivant ce format EXACT: |
| |
| TITRE: [Titre principal de la présentation] |
| |
| DIAPO 1: |
| Titre: [Titre de la diapo] |
| Points: |
| - Point 1 |
| - Point 2 |
| - Point 3 |
| |
| DIAPO 2: |
| Titre: [Titre de la diapo] |
| Points: |
| - Point 1 |
| - Point 2 |
| - Point 3 |
| |
| [Continuez avec ce format pour chaque diapositive] |
| |
| Analysez le texte suivant et créez une présentation professionnelle :""" |
|
|
| class ModelManager: |
| _instance = None |
| |
| def __new__(cls): |
| if cls._instance is None: |
| cls._instance = super(ModelManager, cls).__new__(cls) |
| cls._instance.initialized = False |
| return cls._instance |
| |
| def __init__(self): |
| if not self.initialized: |
| self.token = os.getenv('Authentification_HF') |
| if not self.token: |
| raise ValueError("Token d'authentification HuggingFace non trouvé") |
| login(self.token) |
| self.loaded_models = {} |
| self.loaded_tokenizers = {} |
| self.initialized = True |
| |
| def get_model(self, model_name): |
| """Charge ou récupère un modèle déjà chargé""" |
| if model_name not in self.loaded_models: |
| print(f"Chargement du modèle {model_name}...") |
| model_id = TEXT_MODELS[model_name] |
| |
| if model_id.endswith('.gguf'): |
| model_path = hf_hub_download( |
| repo_id=model_id.split('/')[0] + '/' + model_id.split('/')[1], |
| filename=model_id.split('/')[-1], |
| token=self.token |
| ) |
| self.loaded_models[model_name] = Llama( |
| model_path=model_path, |
| n_ctx=4096, |
| n_batch=512, |
| verbose=False |
| ) |
| print(f"Modèle GGUF {model_id} chargé avec succès!") |
| else: |
| self.loaded_tokenizers[model_name] = AutoTokenizer.from_pretrained(model_id, token=self.token) |
| self.loaded_models[model_name] = AutoModelForCausalLM.from_pretrained( |
| model_id, |
| torch_dtype=torch.bfloat16, |
| device_map="auto", |
| token=self.token |
| ) |
| print(f"Modèle Transformers {model_id} chargé avec succès!") |
| |
| return self.loaded_models[model_name], self.loaded_tokenizers.get(model_name) |
|
|
| class PresentationGenerator: |
| def __init__(self): |
| self.model_manager = ModelManager() |
| |
| def generate_text(self, prompt, model_name, temperature=0.7, max_tokens=4096): |
| """Génère le texte de la présentation""" |
| model, tokenizer = self.model_manager.get_model(model_name) |
| |
| if isinstance(model, Llama): |
| response = model( |
| prompt, |
| max_tokens=max_tokens, |
| temperature=temperature, |
| echo=False |
| ) |
| return response['choices'][0]['text'] |
| else: |
| |
| inputs = tokenizer( |
| prompt, |
| return_tensors="pt", |
| truncation=True, |
| max_length=4096 |
| ).to(model.device) |
| |
| outputs = model.generate( |
| **inputs, |
| max_new_tokens=max_tokens, |
| temperature=temperature, |
| do_sample=True, |
| pad_token_id=tokenizer.eos_token_id |
| ) |
| return tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
| |
| def parse_presentation_content(self, content): |
| """Parse le contenu généré en sections pour les diapositives""" |
| slides = [] |
| current_slide = None |
| |
| for line in content.split('\n'): |
| line = line.strip() |
| if line.startswith('TITRE:'): |
| slides.append({'type': 'title', 'title': line[6:].strip()}) |
| elif line.startswith('DIAPO'): |
| if current_slide: |
| slides.append(current_slide) |
| current_slide = {'type': 'content', 'title': '', 'points': []} |
| elif line.startswith('Titre:') and current_slide: |
| current_slide['title'] = line[6:].strip() |
| elif line.startswith('- ') and current_slide: |
| current_slide['points'].append(line[2:].strip()) |
| |
| if current_slide: |
| slides.append(current_slide) |
| |
| return slides |
|
|
| def create_presentation(self, slides): |
| """Crée la présentation PowerPoint avec texte uniquement""" |
| prs = Presentation() |
| |
| |
| title_slide = prs.slides.add_slide(prs.slide_layouts[0]) |
| title_slide.shapes.title.text = slides[0]['title'] |
| |
| |
| for slide in slides[1:]: |
| content_slide = prs.slides.add_slide(prs.slide_layouts[1]) |
| content_slide.shapes.title.text = slide['title'] |
| |
| |
| if slide['points']: |
| body = content_slide.shapes.placeholders[1].text_frame |
| body.clear() |
| for point in slide['points']: |
| p = body.add_paragraph() |
| p.text = point |
| p.level = 0 |
| |
| return prs |
|
|
| |
| def generate_skeleton(text, text_model_name, temperature, max_tokens): |
| """Génère le squelette de la présentation""" |
| try: |
| start_time = time.time() |
| generator = PresentationGenerator() |
| |
| |
| full_prompt = PREPROMPT + "\n\n" + text |
| generated_content = generator.generate_text(full_prompt, text_model_name, temperature, max_tokens) |
| |
| execution_time = time.time() - start_time |
| status = f"Squelette généré avec succès en {execution_time:.2f} secondes!" |
| |
| return status, generated_content, gr.update(visible=True) |
| |
| except Exception as e: |
| print(f"Erreur lors de la génération: {str(e)}") |
| return f"Erreur: {str(e)}", None, gr.update(visible=False) |
|
|
| def create_presentation_file(generated_content): |
| """Crée le fichier PowerPoint à partir du contenu généré""" |
| try: |
| generator = PresentationGenerator() |
| |
| |
| slides = generator.parse_presentation_content(generated_content) |
| prs = generator.create_presentation(slides) |
| |
| |
| output_path = os.path.abspath("presentation.pptx") |
| prs.save(output_path) |
| |
| |
| if not os.path.exists(output_path): |
| raise FileNotFoundError(f"Le fichier {output_path} n'a pas été créé correctement") |
| |
| return output_path |
| |
| except Exception as e: |
| print(f"Erreur lors de la création du fichier: {str(e)}") |
| return None |
|
|
| |
| with gr.Blocks(theme=gr.themes.Glass()) as demo: |
| gr.Markdown( |
| """ |
| # Générateur de Présentations PowerPoint IA |
| |
| Créez des présentations professionnelles automatiquement avec l'aide de l'IA. |
| """ |
| ) |
| |
| with gr.Row(): |
| with gr.Column(scale=1): |
| text_model_choice = gr.Dropdown( |
| choices=list(TEXT_MODELS.keys()), |
| value=list(TEXT_MODELS.keys())[0], |
| label="Modèle de génération de texte" |
| ) |
| temperature = gr.Slider( |
| minimum=0.1, |
| maximum=1.0, |
| value=0.7, |
| step=0.1, |
| label="Température" |
| ) |
| max_tokens = gr.Slider( |
| minimum=1000, |
| maximum=4096, |
| value=2048, |
| step=256, |
| label="Tokens maximum" |
| ) |
| |
| with gr.Row(): |
| with gr.Column(scale=2): |
| input_text = gr.Textbox( |
| lines=10, |
| label="Votre texte", |
| placeholder="Décrivez le contenu que vous souhaitez pour votre présentation..." |
| ) |
| |
| with gr.Row(): |
| generate_skeleton_btn = gr.Button("Générer le Squelette de la Présentation", variant="primary") |
| |
| with gr.Row(): |
| with gr.Column(): |
| status_output = gr.Textbox( |
| label="Statut", |
| lines=2 |
| ) |
| generated_content = gr.Textbox( |
| label="Contenu généré", |
| lines=10, |
| show_copy_button=True |
| ) |
| create_presentation_btn = gr.Button("Créer Présentation", visible=False) |
| output_file = gr.File( |
| label="Présentation PowerPoint" |
| ) |
| |
| generate_skeleton_btn.click( |
| fn=generate_skeleton, |
| inputs=[ |
| input_text, |
| text_model_choice, |
| temperature, |
| max_tokens |
| ], |
| outputs=[ |
| status_output, |
| generated_content, |
| create_presentation_btn |
| ] |
| ) |
| |
| create_presentation_btn.click( |
| fn=create_presentation_file, |
| inputs=[generated_content], |
| outputs=[output_file] |
| ) |
|
|
| if __name__ == "__main__": |
| demo.launch() |
|
|
|
|