-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathapp.py
More file actions
217 lines (178 loc) · 6.88 KB
/
app.py
File metadata and controls
217 lines (178 loc) · 6.88 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
import streamlit as st
from google import genai
from google.genai import types
import base64
# 1. 页面配置
st.set_page_config(
page_title="Gemini Super Chat",
page_icon="🍌",
layout="wide"
)
st.title("✨ GeminiAPI v4.1 全能助手 (Pro & Nano)中转站模式")
# --- 从 URL 获取参数 ---
url_key = st.query_params.get("key", "")
# 2. 侧边栏:配置与上传
with st.sidebar:
st.header("⚙️ 配置")
# API Key 输入
api_key = st.text_input(
"API Key (中转站 Key)",
value=url_key,
type="password",
help="你可以通过 URL ?key=你的API密钥 来自动填充"
)
st.divider()
# 模型选择
model_map = {
"Gemini 3.1 Pro": "gemini-3.1-pro-preview",
"Gemini 3 Pro": "gemini-3-pro-preview",
"Gemini-3.1 flash lp": "gemini-3.1-flash-lite-preview",
}
selected_label = st.selectbox(
"选择模型",
list(model_map.keys()),
index=0
)
model_id = model_map[selected_label]
# 提示用户当前模型是否支持画图
if "Banana" in selected_label:
st.caption("ℹ️ 当前模型支持图像生成与编辑")
st.divider()
# 文件上传组件
st.header("📤 上传文件/图片")
uploaded_file = st.file_uploader(
"支持图片、PDF、文本等",
type=['png', 'jpg', 'jpeg', 'webp', 'pdf', 'txt', 'csv'],
key="file_uploader"
)
if st.button("🗑️ 清空对话历史"):
st.session_state.messages = []
st.rerun()
# 3. 初始化 Session State
if "messages" not in st.session_state:
st.session_state.messages = []
# 4. 检查 API Key
if not api_key:
st.info("👈 请在左侧输入 API Key 开始对话。")
st.stop()
# ==========================================
# 5. 初始化客户端 (核心修改点:接入中转站)
# ==========================================
try:
client = genai.Client(
api_key=api_key,
http_options={
"base_url": "https://jeniya.cn", # 指向中转站地址
"headers": {
"Authorization": f"Bearer {api_key}" # 按要求传入 Bearer 认证头
}
}
)
except Exception as e:
st.error(f"客户端初始化失败: {e}")
st.stop()
# --- 辅助函数:显示内容 (增强版) ---
def display_content(content_data, mime_type):
if not content_data:
return
if mime_type and mime_type.startswith("image/"):
st.image(content_data, width=400) # 适当放大图片宽度
elif mime_type == "application/pdf":
st.caption("📄 [PDF 文件]")
else:
st.caption(f"📎 [文件: {mime_type}]")
# 6. 显示历史聊天记录
for message in st.session_state.messages:
with st.chat_message(message["role"]):
# 1. 显示用户上传的文件
if message.get("file_data"):
display_content(message["file_data"], message.get("mime_type"))
# 2. 显示文本内容
if message.get("content"):
st.markdown(message["content"])
# 3. 显示模型生成的图片
if message.get("generated_images"):
for img_data, img_mime in message["generated_images"]:
st.image(img_data, caption="Generated Image", width=400)
# 7. 处理用户输入
if prompt := st.chat_input("输入你的问题... (例如: 画一只在太空冲浪的猫)"):
# 准备当前消息的数据结构
current_msg = {
"role": "user",
"content": prompt,
"file_data": None,
"mime_type": None
}
# 处理上传的文件
user_parts = [types.Part.from_text(text=prompt)]
if uploaded_file:
bytes_data = uploaded_file.getvalue()
mime_type = uploaded_file.type
# 保存到 session state 以便回显
current_msg["file_data"] = bytes_data
current_msg["mime_type"] = mime_type
# 构建 API 请求部分
file_part = types.Part.from_bytes(data=bytes_data, mime_type=mime_type)
user_parts.append(file_part)
# 显示用户消息 (UI)
with st.chat_message("user"):
if current_msg["file_data"]:
display_content(current_msg["file_data"], current_msg["mime_type"])
st.markdown(prompt)
# 保存用户消息到历史
st.session_state.messages.append(current_msg)
# 生成 AI 回复
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response_text = ""
generated_images = []
try:
# --- 构建历史记录 ---
history_contents = []
for msg in st.session_state.messages[:-1]:
role = "user" if msg["role"] == "user" else "model"
parts = []
if msg.get("content"):
parts.append(types.Part.from_text(text=msg["content"]))
if msg.get("file_data"):
parts.append(types.Part.from_bytes(
data=msg["file_data"],
mime_type=msg["mime_type"]
))
if parts:
history_contents.append(types.Content(role=role, parts=parts))
# --- 创建聊天会话 ---
chat = client.chats.create(
model=model_id,
history=history_contents,
config=types.GenerateContentConfig(
temperature=0.7,
)
)
# --- 发送请求 ---
response = chat.send_message_stream(user_parts)
# --- 处理流式响应 ---
for chunk in response:
if chunk.text:
full_response_text += chunk.text
message_placeholder.markdown(full_response_text + "▌")
# 处理非文本内容 (图片)
if chunk.candidates:
for candidate in chunk.candidates:
for part in candidate.content.parts:
if part.inline_data:
img_bytes = part.inline_data.data
img_mime = part.inline_data.mime_type
generated_images.append((img_bytes, img_mime))
st.image(img_bytes, caption="✨ 生成预览", width=400)
# 最终刷新文本
message_placeholder.markdown(full_response_text)
except Exception as e:
st.error(f"API 请求错误: {e}")
full_response_text = f"错误: {str(e)}"
# 保存助手回复到历史
st.session_state.messages.append({
"role": "assistant",
"content": full_response_text,
"generated_images": generated_images
})