linkstatic1 commited on
Commit
3f14974
Β·
verified Β·
1 Parent(s): 4cd9408

Update src/streamlit_app.py

Browse files
Files changed (1) hide show
  1. src/streamlit_app.py +29 -16
src/streamlit_app.py CHANGED
@@ -6,6 +6,18 @@ import datetime
6
  # --- PAGE CONFIG ---
7
  st.set_page_config(page_title="SaaS Media Vault", layout="wide")
8
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  st.title("πŸ—„οΈ SaaS Media Intelligence Vault")
10
  st.markdown("Analyze, store, and **search** your media assets using AI-generated metadata.")
11
 
@@ -13,18 +25,18 @@ st.markdown("Analyze, store, and **search** your media assets using AI-generated
13
  if "media_library" not in st.session_state:
14
  st.session_state.media_library = []
15
 
16
- # --- MODEL LOADING (Option 1: Auto-Detect Task) ---
17
  @st.cache_resource
18
  def load_models():
19
- # CLIP for matching labels
20
  classifier = pipeline("zero-shot-image-classification",
21
  model="openai/clip-vit-base-patch32",
22
  device=-1)
23
 
24
- # Use the specific task name from your error message
25
  captioner = pipeline("image-text-to-text",
26
- model="Salesforce/blip-image-captioning-base",
27
- device=-1)
28
 
29
  return classifier, captioner
30
 
@@ -32,44 +44,44 @@ classifier, captioner = load_models()
32
 
33
  # --- SIDEBAR: UPLOAD & SETTINGS ---
34
  st.sidebar.header("πŸ“₯ Asset Management")
35
- labels_input = st.sidebar.text_input("Analysis Keywords", "professional, tech, lifestyle, nature")
36
  uploaded_file = st.sidebar.file_uploader("Add New Image", type=["jpg", "png", "jpeg"])
37
 
38
  if uploaded_file:
39
  if st.sidebar.button("Process & Index Asset"):
40
  image = Image.open(uploaded_file)
41
 
42
- with st.spinner("AI is thinking..."):
43
  # 1. BLIP Description
44
  prompt = "a photo of"
45
  caption_out = captioner(image, text=prompt, max_new_tokens=30)
46
- description = caption_out[0]['generated_text']
47
 
48
- # 2. CLIP Top Label
49
- labels = [l.strip() for l in labels_input.split(",")]
50
- clip_out = classifier(image, candidate_labels=labels)
 
51
  top_label = clip_out[0]['label']
52
  top_score = clip_out[0]['score']
53
 
54
  # 3. SAVE TO ARRAY
55
  asset_data = {
56
- "id": len(st.session_state.media_library),
57
  "timestamp": datetime.datetime.now().strftime("%H:%M:%S"),
58
  "image": image,
59
  "description": description.lower(),
60
  "tag": top_label.lower(),
61
  "confidence": top_score
62
  }
 
63
  st.session_state.media_library.insert(0, asset_data)
64
  st.sidebar.success("Asset Cataloged!")
65
 
66
  # --- MAIN SECTION: SEARCH & RETRIEVAL ---
67
  st.subheader("πŸ” Intelligent Retrieval")
68
- search_query = st.text_input("Search the vault by keyword (e.g., 'dog', 'tech', 'landscape')", "").lower()
69
 
70
  # --- FILTER LOGIC ---
71
  if search_query:
72
- # We check if the search term is in the AI description or the chosen Tag
73
  filtered_items = [
74
  item for item in st.session_state.media_library
75
  if search_query in item["description"] or search_query in item["tag"]
@@ -78,11 +90,12 @@ else:
78
  filtered_items = st.session_state.media_library
79
 
80
  # --- DISPLAY VAULT ---
81
- st.write(f"Showing {len(filtered_items)} of {len(st.session_state.media_library)} assets")
82
 
83
  if not filtered_items:
84
  st.info("No matching assets found in the vault.")
85
  else:
 
86
  for item in filtered_items:
87
  with st.container():
88
  col1, col2 = st.columns([1, 3])
@@ -91,7 +104,7 @@ else:
91
  with col2:
92
  st.write(f"**πŸ•’ Logged:** {item['timestamp']}")
93
  st.info(f"**AI Description:** {item['description'].capitalize()}")
94
- st.write(f"**🏷️ Primary Tag:** `{item['tag']}` ({round(item['confidence']*100, 1)}%)")
95
  st.divider()
96
 
97
  # --- CLEAR UTILITY ---
 
6
  # --- PAGE CONFIG ---
7
  st.set_page_config(page_title="SaaS Media Vault", layout="wide")
8
 
9
+ # Custom CSS to prevent layout shifting and "shaking"
10
+ st.markdown("""
11
+ <style>
12
+ .stColumn {
13
+ transition: none !important;
14
+ }
15
+ div[data-testid="stVerticalBlock"] > div {
16
+ animation: none !important;
17
+ }
18
+ </style>
19
+ """, unsafe_allow_html=True)
20
+
21
  st.title("πŸ—„οΈ SaaS Media Intelligence Vault")
22
  st.markdown("Analyze, store, and **search** your media assets using AI-generated metadata.")
23
 
 
25
  if "media_library" not in st.session_state:
26
  st.session_state.media_library = []
27
 
28
+ # --- MODEL LOADING ---
29
  @st.cache_resource
30
  def load_models():
31
+ # CLIP for general categorization
32
  classifier = pipeline("zero-shot-image-classification",
33
  model="openai/clip-vit-base-patch32",
34
  device=-1)
35
 
36
+ # BLIP for natural language description
37
  captioner = pipeline("image-text-to-text",
38
+ model="Salesforce/blip-image-captioning-base",
39
+ device=-1)
40
 
41
  return classifier, captioner
42
 
 
44
 
45
  # --- SIDEBAR: UPLOAD & SETTINGS ---
46
  st.sidebar.header("πŸ“₯ Asset Management")
 
47
  uploaded_file = st.sidebar.file_uploader("Add New Image", type=["jpg", "png", "jpeg"])
48
 
49
  if uploaded_file:
50
  if st.sidebar.button("Process & Index Asset"):
51
  image = Image.open(uploaded_file)
52
 
53
+ with st.spinner("AI is indexing..."):
54
  # 1. BLIP Description
55
  prompt = "a photo of"
56
  caption_out = captioner(image, text=prompt, max_new_tokens=30)
57
+ description = caption_out[0]['generated_text'].replace("a photo of ", "")
58
 
59
+ # 2. Internal Auto-Tagging (Replaces the manual keywords input)
60
+ # We use a broad set of categories to give the AI context without user input
61
+ auto_labels = ["object", "person", "place", "nature", "technology", "document"]
62
+ clip_out = classifier(image, candidate_labels=auto_labels)
63
  top_label = clip_out[0]['label']
64
  top_score = clip_out[0]['score']
65
 
66
  # 3. SAVE TO ARRAY
67
  asset_data = {
68
+ "id": f"{datetime.datetime.now().timestamp()}", # Unique ID to prevent UI jitter
69
  "timestamp": datetime.datetime.now().strftime("%H:%M:%S"),
70
  "image": image,
71
  "description": description.lower(),
72
  "tag": top_label.lower(),
73
  "confidence": top_score
74
  }
75
+ # Insert at the top
76
  st.session_state.media_library.insert(0, asset_data)
77
  st.sidebar.success("Asset Cataloged!")
78
 
79
  # --- MAIN SECTION: SEARCH & RETRIEVAL ---
80
  st.subheader("πŸ” Intelligent Retrieval")
81
+ search_query = st.text_input("Search the vault (e.g., 'flowers', 'tech', 'laptop')", "").lower()
82
 
83
  # --- FILTER LOGIC ---
84
  if search_query:
 
85
  filtered_items = [
86
  item for item in st.session_state.media_library
87
  if search_query in item["description"] or search_query in item["tag"]
 
90
  filtered_items = st.session_state.media_library
91
 
92
  # --- DISPLAY VAULT ---
93
+ st.write(f"Showing **{len(filtered_items)}** assets")
94
 
95
  if not filtered_items:
96
  st.info("No matching assets found in the vault.")
97
  else:
98
+ # Using a container with a fixed key helps Streamlit manage the DOM state better
99
  for item in filtered_items:
100
  with st.container():
101
  col1, col2 = st.columns([1, 3])
 
104
  with col2:
105
  st.write(f"**πŸ•’ Logged:** {item['timestamp']}")
106
  st.info(f"**AI Description:** {item['description'].capitalize()}")
107
+ st.write(f"**🏷️ Type:** `{item['tag']}` ({round(item['confidence']*100, 1)}%)")
108
  st.divider()
109
 
110
  # --- CLEAR UTILITY ---