| import streamlit as st |
| import requests |
| from dotenv import load_dotenv |
| import os |
| import time |
| from helper.telemetry import collect_telemetry |
| from helper.upload_File import uploadFile |
| from helper.button_behaviour import hide_button |
| from helper.initialize_analyze_session import initialize_analyze_session |
| import json |
| from newsapi import NewsApiClient |
| import requests |
| from google import genai |
| from openai import OpenAI |
|
|
| class TargetMarketAnalyst: |
| def __init__(self, model_url, analyst_name, data_src, analyst_description): |
| self.model_url = model_url |
| self.analyst_name = analyst_name |
| self.data_src = data_src |
| self.analyst_description = analyst_description |
| self.initialize() |
| self.row1() |
|
|
| def initialize(self): |
| |
| load_dotenv() |
|
|
| |
| st.header(self.analyst_name) |
|
|
| |
| url = os.getenv('Link') |
| st.write('Evaluation Form: [Link](%s)' % url) |
| |
| def request_model(self, payload_txt): |
| response = requests.post(self.model_url, json=payload_txt) |
| response.raise_for_status() |
| output = response.json() |
|
|
| sources = self.newsapi.get_sources() |
| response.raise_for_status() |
| output = response.json() |
|
|
| text = output["outputs"][0]["outputs"][0]["results"]["text"]["data"]["text"] |
| text = json.loads(text) |
| text = text[0] |
|
|
| target_market = text["target_market"] |
| demographics = text["demographics"] |
| summary = text["summary"] |
| |
| with st.expander("News Available", expanded=True, icon="🤖"): |
| st.write(f"**Target Market**:\n {target_market}\n") |
| st.write(f"\n**Product / Service Demographics**: {demographics}") |
| st.write(f"\n**Marketing Message Summary**: {summary}") |
| |
| return output |
| |
| def row1(self): |
| col1, col2 = st.columns(gap="medium", spec=[0.33, 0.66]) |
| with col1: |
| prompt = st.chat_input("How can I help you today?") |
| |
| |
| payload_txt = { |
| "input_value": f"{prompt}", |
| "output_type": "chat", |
| "input_type": "chat", |
| "tweaks": { |
| "Agent-jDo0M": {}, |
| "ChatInput-TcV5B": {}, |
| "ChatOutput-woYKj": {}, |
| "URL-iTqUH": {}, |
| "CalculatorComponent-yPwgW": {}, |
| "APIRequest-rDfwC": {}, |
| "TextInput-sID7m": {}, |
| "ParseData-ezi1L": {} |
| } |
| } |
|
|
| headers = { |
| 'Content-Type': 'application/json', |
| } |
| |
| url = "http://172.17.21.23:7860/api/v1/run/382ac239-3231-4f9b-89fe-f5ee26e4b1eb?stream=false" |
|
|
| |
| |
| if prompt: |
| response = requests.post(url, json=payload_txt, headers=headers, params={'stream': 'false'}) |
| response.raise_for_status() |
| output = response.json() |
| text = output["outputs"][0]["outputs"][0]["results"]["message"]["text"] |
|
|
| def remove_escape_sequences(text): |
| return text.replace('\\n', '\n').replace('\\t', '\t').replace('\\r', '\r') |
|
|
| self.text = remove_escape_sequences(json.dumps(text, ensure_ascii=False).strip('"')) |
| ''' |
| self.topics = st.text_input("Topic of Interest: ", placeholder='Enter Topic of Interest:', key='topic') |
| #self.start_date = st.date_input('Start Date:', value="today", key='start_date') |
| #self.end_date = st.date_input('End Date:', value="today", key='end_date') |
| |
| self.sort = st.radio( |
| "Sort by:", |
| ["Published At", "Popularity", "Relevancy"], |
| captions=[ |
| "Newest articles come first", |
| "Articles from popular sources and publishers come first", |
| "Articles more closely related to topic come first", |
| ], |
| ) |
| |
| if self.sort == 'Relevancy': |
| self.sort = 'relevancy' |
| elif self.sort == 'Popularity': |
| self.sort = 'popularity' |
| elif self.sort == 'Published At': |
| self.sort = 'publishedAt' |
| ''' |
| with col2: |
| |
| |
| st.write("Content: ") |
| |
| |
| if prompt: |
| st.write(self.text) |
| |
| start_time = time.time() |
| ''' |
| if analyze_button: |
| hide_button() |
| if self.topics: |
| combined_text = "" |
| with st.spinner('Analyzing...', show_time=True): |
| st.write('') |
| # INITIALIZING SESSIONS |
| |
| combined_text += f"Topic/s of Interest: {self.topics}\n" |
| # OUTPUT FOR SEO ANALYST |
| root = 'https://newsapi.org/v2/everything?' |
| language = "en" |
| api = os.getenv('apiKey') |
| response = requests.get(f'{root}q={self.topics}&language={language}&sortBy={self.sort}&language="en"&apiKey={api}') |
| try: |
| response.raise_for_status() |
| output = response.json() |
| with st.expander("Output", expanded=True): |
| text = output['articles'] |
| |
| for article in text: |
| source = article['source']['name'] |
| published = article['publishedAt'] |
| author = article['author'] |
| title = article['title'] |
| description = article['description'] |
| url = article['url'] |
| urlImage = article['urlToImage'] |
| content = article['content'] |
| |
| |
| left_co, cent_co,last_co = st.columns(3) |
| |
| if urlImage: |
| with left_co: |
| st.image(urlImage, width=400) |
| st.write(f"**{title}**") |
| st.write(f"{description}") |
| st.write(f"**Author**: {author}") |
| st.write(f"**Source**: {source}") |
| st.write('Link: [URL](%s)' % url) |
| st.write(f"**Published At**: {published}") |
| |
| st.write("---") |
| |
| |
| st.write(article) |
| |
| end_time = time.time() |
| time_lapsed = end_time - start_time |
| debug_info = { |
| 'analyst': self.analyst_name, |
| 'time_lapsed': time_lapsed, |
| |
| } |
| |
| collect_telemetry(debug_info) |
| |
| with st.expander("Debug information", icon="⚙"): |
| st.write(debug_info) |
| |
| st.session_state['analyzing'] = False |
| except requests.exceptions.HTTPError: |
| st.info("Exceeded limit, please come back tomorrow") |
| else: |
| st.info("Please provide a topic of interest") |
| hide_button() |
| ''' |
| if __name__ == "__main__": |
| st.set_page_config(layout="wide") |
|
|
| upload = uploadFile() |
|
|