Python 打造 AI 三剑客:文档总结、代码生成与智能检索
介绍如何使用 Python 构建三个 AI 工具:文档总结器、代码生成器和智能资料助手。通过封装统一的 LLM 客户端,结合异步处理与分块策略,实现 PDF/网页解析总结、代码自动生成与测试、多源搜索整合。提供完整的 CLI 工具实现及云端部署方案,旨在提升开发效率与信息获取速度。

介绍如何使用 Python 构建三个 AI 工具:文档总结器、代码生成器和智能资料助手。通过封装统一的 LLM 客户端,结合异步处理与分块策略,实现 PDF/网页解析总结、代码自动生成与测试、多源搜索整合。提供完整的 CLI 工具实现及云端部署方案,旨在提升开发效率与信息获取速度。

你是否遇到过这些场景?
如果告诉你,用不到 200 行 Python 代码,就能打造一个 AI 助手,帮你解决这些问题,你信吗?
今天,我将带你从零开始,用 Python 打造三个 AI 工具:
| 技术组件 | 推荐方案 | 成本 | 说明 |
|---|---|---|---|
| LLM 模型 | DeepSeek / Qwen | 免费/低价 | 国内模型,中文优秀 |
| API 平台 | 硅基流动 / 魔搭社区 | ¥0.001/1k tokens | 新用户有免费额度 |
| 文档解析 | PyPDF2 / Unstructured | 免费 | 支持 PDF/Word/Markdown |
| 代码运行 | Subprocess / Docker | 免费 | 本地沙箱执行 |
| 搜索引擎 | Bing Search API | 付费(有免费层) | 或用 DuckDuckGo 免费版 |
# 创建虚拟环境
python -m venv ai-tools-env
source ai-tools-env/bin/activate # Windows 用:ai-tools-env\Scripts\activate
# 安装依赖
pip install openai pypdf2 requests beautifulsoup4 python-dotenv
pip install aiohttp httpx # 异步请求支持
创建 .env 文件:
# API 配置
DEEPSEEK_API_KEY=your_deepseek_api_key
DEEPSEEK_BASE_URL=https://api.deepseek.com/v1
# 或使用硅基流动(支持多个模型)
SILICONFLOW_API_KEY=your_siliconflow_key
SILICONFLOW_BASE_URL=https://api.siliconflow.cn/v1
# 搜索 API(可选)
BING_SEARCH_API_KEY=your_bing_key
在开始之前,我们先封装一个统一的 LLM 调用类:
import os
import asyncio
from typing import List, Dict, Optional, AsyncGenerator
from dataclasses import dataclass
from openai import AsyncOpenAI
from dotenv import load_dotenv
load_dotenv()
@dataclass
class Message:
"""消息数据结构"""
role: str # system / user / assistant
content: str
class LLMClient:
"""统一的大模型客户端"""
def __init__(self, api_key: str = None, base_url: str = None, model: str = "deepseek-chat", temperature: float = 0.7):
self.api_key = api_key or os.getenv("DEEPSEEK_API_KEY")
self.base_url = base_url or os.getenv("DEEPSEEK_BASE_URL")
self.model = model
self.temperature = temperature
self.client = AsyncOpenAI(api_key=self.api_key, base_url=self.base_url)
async def chat(self, messages: List[Message], stream: bool = , **kwargs) -> :
response = .client.chat.completions.create(
model=.model,
messages=[{: m.role, : m.content} m messages],
temperature=kwargs.get(, .temperature),
stream=stream,
max_tokens=kwargs.get(, )
)
stream:
full_content =
chunk response:
chunk.choices[].delta.content:
content = chunk.choices[].delta.content
full_content += content
(content, end=, flush=)
full_content
:
response.choices[].message.content
() -> :
response = .client.chat.completions.create(
model=.model,
messages=[{: m.role, : m.content} m messages],
tools=functions,
tool_choice=
)
response.choices[].message
():
llm = LLMClient()
response = llm.chat([Message(role=, content=)])
(response)
__name__ == :
asyncio.run(test_llm())
import asyncio
from typing import List, Optional
from pathlib import Path
import PyPDF2
from bs4 import BeautifulSoup
import aiohttp
from dataclasses import dataclass
from datetime import datetime
@dataclass
class DocumentSummary:
"""文档摘要结果"""
title: str
summary: str
key_points: List[str]
reading_time: int # 预计阅读时间(分钟)
word_count: int
created_at: str
class DocumentParser:
"""文档解析器"""
@staticmethod
async def parse_pdf(file_path: str) -> str:
"""解析 PDF 文件"""
text = ""
with open(file_path, 'rb') as file:
pdf_reader = PyPDF2.PdfReader(file)
for page in pdf_reader.pages:
text += page.extract_text() + "\n"
return text
@staticmethod
async def parse_text() -> :
(file_path, , encoding=) f:
f.read()
() -> :
aiohttp.ClientSession() session:
session.get(url) response:
html = response.text()
soup = BeautifulSoup(html, )
script soup([, ]):
script.decompose()
soup.get_text(separator=, strip=)
:
():
.chunk_size = chunk_size
.overlap = overlap
() -> []:
paragraphs = text.split()
chunks = []
current_chunk =
para paragraphs:
(current_chunk) + (para) <= .chunk_size:
current_chunk += para +
:
current_chunk:
chunks.append(current_chunk.strip())
(para) > .chunk_size:
i (, (para), .chunk_size - .overlap):
chunks.append(para[i:i + .chunk_size])
current_chunk =
current_chunk:
chunks.append(current_chunk.strip())
chunks
:
():
.llm = llm_client
.parser = DocumentParser()
.chunker = TextChunker()
() -> DocumentSummary:
()
source_type == :
text = .parser.parse_url(source)
title = ._extract_title_from_url(text)
:
source.endswith():
text = .parser.parse_pdf(source)
:
text = .parser.parse_text(source)
title = Path(source).stem
word_count = (text)
reading_time = (, word_count // )
()
()
chunks = .chunker.chunk(text)
()
()
chunk_summaries = ._summarize_chunks(chunks)
()
final_summary = ._merge_summaries(chunk_summaries, title)
key_points = ._extract_key_points(final_summary)
DocumentSummary(
title=title,
summary=final_summary,
key_points=key_points,
reading_time=reading_time,
word_count=word_count,
created_at=datetime.now().strftime()
)
() -> []:
semaphore = asyncio.Semaphore()
():
semaphore:
prompt =
response = .llm.chat([
Message(role=, content=),
Message(role=, content=prompt)
])
()
response
tasks = [summarize_chunk(chunk, i) i, chunk (chunks)]
asyncio.gather(*tasks)
() -> :
combined = .join([ s summaries])
prompt =
response = .llm.chat([
Message(role=, content=),
Message(role=, content=prompt)
])
response
() -> []:
prompt =
response = .llm.chat([Message(role=, content=prompt)])
[line.strip() line response.split() line.strip()]
() -> :
prompt =
response = .llm.chat([Message(role=, content=prompt)])
response.strip()
():
llm = LLMClient()
summarizer = DocumentSummarizer(llm)
result = summarizer.summarize(source=, source_type=)
( + *)
()
()
()
()
point result.key_points:
()
()
__name__ == :
asyncio.run(main_summarizer())
| 文档类型 | 原始阅读时间 | AI 总结时间 | 效率提升 |
|---|---|---|---|
| 论文(30 页) | 60 分钟 | 30 秒 | 120 倍 |
| 技术文档 | 20 分钟 | 15 秒 | 80 倍 |
| 新闻文章 | 5 分钟 | 10 秒 | 30 倍 |
| 行业报告 | 45 分钟 | 25 秒 | 108 倍 |
import re
import subprocess
import tempfile
from typing import Dict, List, Optional, Tuple
from enum import Enum
import ast
class CodeMode(Enum):
"""代码生成模式"""
GENERATE = "generate"
EXPLAIN = "explain"
OPTIMIZE = "optimize"
DEBUG = "debug"
TEST = "test"
@dataclass
class CodeResult:
"""代码生成结果"""
code: str
language: str
explanation: str
tests: Optional[str] = None
warnings: List[str] = None
class CodeGenerator:
"""AI 代码生成器"""
def __init__(self, llm_client: LLMClient):
self.llm = llm_client
self.quality_rules = {
"security": [r"eval\s*\(", r"exec\s*\(", r"pickle\.loads?"],
"performance": [r"for\s+\w+\s+in\s+range\(len\("]
}
async def generate() -> CodeResult:
mode_prompts = {
CodeMode.GENERATE: ._build_generate_prompt,
CodeMode.EXPLAIN: ._build_explain_prompt,
CodeMode.OPTIMIZE: ._build_optimize_prompt,
CodeMode.DEBUG: ._build_debug_prompt,
CodeMode.TEST: ._build_test_prompt,
}
prompt_builder = mode_prompts[mode]
prompt = prompt_builder(requirement, language, context)
()
response = .llm.chat([
Message(role=, content=._get_system_prompt(language)),
Message(role=, content=prompt)
])
code, explanation = ._parse_code_response(response, language)
warnings = ._security_check(code)
tests =
mode == CodeMode.GENERATE:
tests = ._generate_tests(code, language)
CodeResult(code=code, language=language, explanation=explanation, tests=tests, warnings=warnings)
() -> :
使用说明: [说明内容] """
def _build_generate_prompt(self, requirement: str, language: str, context: str) -> str:
if context:
return f"""请根据以下需求生成{language}代码:
需求:{requirement}
上下文代码:
{context}
请生成完整的、可直接运行的代码。"""
return f"""请根据以下需求生成{language}代码:
需求:{requirement}
要求:
代码完整且可运行
包含必要的输入验证和错误处理
添加清晰的注释
如果是算法,注明时间复杂度 请生成代码:"""
def _build_explain_prompt(self, code: str, language: str, context: str) -> str:
return f"""请详细解释以下{language}代码的功能和工作原理:
{code}
请从以下几个方面解释:
整体功能概述
关键代码逻辑
使用的数据结构和算法
时间/空间复杂度
可能的改进点 详细解释:"""
def _build_optimize_prompt(self, code: str, language: str, context: str) -> str:
return f"""请优化以下{language}代码:
{code}
优化目标:
提升性能
改善可读性
增强健壮性
遵循最佳实践 请给出:
优化后的代码
优化点说明 优化结果:"""
def _build_debug_prompt(self, code: str, language: str, context: str) -> str:
return f"""请分析以下{language}代码中的问题并修复:
{code}
可能的错误信息:
{context if context else "[无]"}
请给出:
问题分析
修复后的代码
预防建议 分析结果:"""
def _build_test_prompt(self, code: str, language: str, context: str) -> str:
return f"""请为以下{language}代码生成完整的测试用例:
{code}
测试要求:
测试函数名以 test_开头
包含正常和异常情况
使用合适的测试框架(如 pytest) 只输出测试代码:"""
def _parse_code_response(self, response: str, language: str) -> Tuple[str, str]:
code_pattern = rf"{language}\n(.*?)"
code_match = re.search(code_pattern, response, re.DOTALL)
if code_match:
code = code_match.group(1).strip()
explanation = response.replace(code_match.group(0), "").strip()
else:
code = response
explanation = "无额外说明"
return code, explanation
def _security_check(self, code: str) -> List[str]: warnings = [] for category, patterns in self.quality_rules.items(): for pattern in patterns: if re.search(pattern, code): warnings.append(f"⚠️ 安全警告:检测到 {pattern} 使用") try: ast.parse(code) except SyntaxError as e: warnings.append(f"⚠️ 语法错误:{e}") return warnings
async def _generate_tests(self, code: str, language: str) -> str:
prompt = f"""为以下{language}代码编写 pytest 测试:
{code}
要求:
测试函数名以 test_开头
包含正常和异常情况
使用 pytest 断言 只输出测试代码:""" response = await self.llm.chat([Message(role="user", content=prompt)]) return response
async def execute_code(self, code: str, language: str = "python", timeout: int = 10) -> Dict: """安全执行代码""" with tempfile.NamedTemporaryFile(mode='w', suffix=f'.{language}', delete=False) as f: f.write(code) temp_file = f.name try: result = subprocess.run(['python', temp_file], capture_output=True, text=True, timeout=timeout) return { "success": result.returncode == 0, "output": result.stdout, "error": result.stderr } except subprocess.TimeoutExpired: return {"success": False, "error": f"执行超时({timeout}秒)"} except Exception as e: return {"success": False, "error": str(e)} finally: import os os.unlink(temp_file)
#### 交互式代码生成器
```python
class InteractiveCodeAssistant:
"""交互式代码助手"""
def __init__(self, llm_client: LLMClient):
self.generator = CodeGenerator(llm_client)
self.history: List[Dict] = []
async def chat(self, user_input: str) -> str:
"""对话式代码助手"""
intent = await self._detect_intent(user_input)
if intent == "generate":
result = await self.generator.generate(requirement=user_input, mode=CodeMode.GENERATE)
output = f"```python\n{result.code}\n```\n\n"
output += f"**说明:**\n{result.explanation}\n\n"
if result.warnings:
output += "**安全警告:**\n" + "\n".join(result.warnings) + "\n\n"
if result.tests:
output += f"**测试代码:**\n```python\n{result.tests}\n```"
return output
elif intent == "explain":
code = self._extract_code_from_input(user_input)
result = await self.generator.generate(requirement=code, mode=CodeMode.EXPLAIN)
return result.explanation
async def _detect_intent() -> :
prompt =
response = .generator.llm.chat([Message(role=, content=prompt)])
intent = response.strip().lower()
intent intent [, , , ]
() -> :
= re.search(, user_input, re.DOTALL)
:
.group().strip()
user_input
async def main_code_generator():
llm = LLMClient()
assistant = InteractiveCodeAssistant(llm)
# 示例 1:生成代码
print("="*60)
print("示例 1:生成快速排序代码")
print("="*60)
result = await assistant.chat("用 Python 实现一个快速排序,要求有详细注释")
print(result)
# 示例 2:解释代码
code = """def quicksort(arr):
if len(arr) <= 1:
return arr
pivot = arr[len(arr) // 2]
left = [x for x in arr if x < pivot]
middle = [x for x in arr if x == pivot]
right = [x for x in arr if x > pivot]
return quicksort(left) + middle + quicksort(right)"""
explanation = await assistant.chat(f"解释这段代码在做什么:\n```\n{code}\n```")
print(explanation)
if __name__ == "__main__":
asyncio.run(main_code_generator())
| 功能 | ChatGPT 网页版 | 本地 AI 工具 | 优势 |
|---|---|---|---|
| 生成速度 | 3-5 秒 | 2-3 秒 | 快 40% |
| 代码可运行率 | 85% | 90%+ | 自定义优化 |
| 安全检查 | ❌ | ✅ | 内置规则 |
| 测试生成 | 需额外要求 | 自动生成 | 一站式 |
| 批量处理 | ❌ | ✅ | 脚本化 |
| 成本 | $20/月 | ¥10/月 | 省 60% |
graph TB
A[用户提问] --> B[问题分析]
B --> C{问题类型?}
C -->|事实查询| D[搜索引擎]
C -->|API 文档| E[官方文档库]
C -->|StackOverflow| F[SO 搜索]
C -->|综合查询| G[多源并行搜索]
D --> H[结果提取]
E --> H
F --> H
G --> H
H --> I[内容清洗]
I --> J[相关性排序]
J --> K[AI 总结整合]
K --> L[结构化输出]
L --> M[直接答案]
L --> N[参考链接]
L --> O[相关推荐]
import aiohttp
from typing import List, Dict, Optional
from dataclasses import dataclass
import re
from urllib.parse import quote, urljoin
import json
@dataclass
class SearchResult:
"""搜索结果"""
title: str
url: str
snippet: str
source: str # google / bing / docs / stackoverflow
relevance: float = 0.0
@dataclass
class ResearchResult:
"""研究结果"""
answer: str
sources: List[SearchResult]
related_questions: List[str]
confidence: float
class SearchEngine:
"""搜索引擎封装"""
def __init__(self, bing_api_key: str = None):
self.bing_api_key = bing_api_key or os.getenv("BING_SEARCH_API_KEY")
self.headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"}
async def search_bing(self, query: str, count: = ) -> [SearchResult]:
.bing_api_key:
._search_duckduckgo(query, count)
url =
params = {: query, : count, : }
aiohttp.ClientSession() session:
session.get(url, params=params, headers={: .bing_api_key}) response:
data = response.json()
results = []
item data.get(, {}).get(, []):
results.append(SearchResult(title=item[], url=item[], snippet=item[], source=))
results
() -> [SearchResult]:
url =
aiohttp.ClientSession() session:
session.get(url, headers=.headers) response:
html = response.text()
bs4 BeautifulSoup
soup = BeautifulSoup(html, )
results = []
result soup.select()[:count]:
title_elem = result.select_one()
snippet_elem = result.select_one()
url_elem = result.select_one()
title_elem url_elem:
results.append(SearchResult(
title=title_elem.get_text(),
url=url_elem.get(, ),
snippet=snippet_elem.get_text() snippet_elem ,
source=
))
results
() -> [SearchResult]:
search_query =
results = ._search_duckduckgo(search_query, count)
r results:
r.source =
results
() -> [SearchResult]:
search_query =
results = ._search_duckduckgo(search_query, count)
r results:
r.source =
results
:
():
.llm = llm_client
.search = search_engine
() -> ResearchResult:
()
search_tasks = []
sources sources:
search_tasks.append(.search.search_bing(question))
sources sources:
search_tasks.append(.search.search_stackoverflow(question))
._is_technical_question(question):
tech = ._detect_tech_stack(question)
tech:
docs_url = ._get_docs_url(tech)
search_tasks.append(.search.search_docs(question, docs_url))
search_results_list = asyncio.gather(*search_tasks)
all_results = []
results search_results_list:
all_results.extend(results)
()
depth > :
all_results = ._fetch_page_contents(all_results[:])
answer = ._synthesize_answer(question, all_results)
related = ._generate_related_questions(question, answer)
confidence = ._calculate_confidence(all_results)
ResearchResult(answer=answer, sources=all_results[:], related_questions=related, confidence=confidence)
() -> :
tech_keywords = [, , , , , , , , ]
(kw question.lower() kw tech_keywords)
() -> []:
prompt =
response = .llm.chat([Message(role=, content=prompt)])
tech = response.strip().lower()
tech_docs = {: , : , : , : , : , : }
tech_docs.get(tech)
() -> :
tech_docs = {: , : , : , : , : }
tech_docs.get(tech, )
() -> [SearchResult]:
():
:
aiohttp.ClientSession() session:
session.get(result.url, headers=.search.headers, timeout=aiohttp.ClientTimeout(total=)) response:
html = response.text()
bs4 BeautifulSoup
soup = BeautifulSoup(html, )
script soup([, , , ]):
script.decompose()
text = soup.get_text(separator=, strip=)
result.snippet = text[:] +
result.relevance =
Exception e:
()
tasks = [fetch_content(r) r results]
asyncio.gather(*tasks)
results
() -> :
context = .join([ i, r (results[:])])
prompt =
answer = .llm.chat([
Message(role=, content=),
Message(role=, content=prompt)
])
answer
() -> []:
prompt =
response = .llm.chat([Message(role=, content=prompt)])
[line.strip() line response.split() line.strip() line.startswith()][:]
() -> :
results:
base_confidence = (, (results)/)
has_docs = (r.source == r results)
has_docs:
base_confidence = (, base_confidence + )
(base_confidence, )
():
llm = LLMClient()
search = SearchEngine()
researcher = IntelligentResearcher(llm, search)
result = researcher.research(question=, depth=)
( + *)
()
(*)
()
()
()
i, source (result.sources, ):
()
()
()
()
q result.related_questions:
()
__name__ == :
asyncio.run(main_researcher())
| 操作 | 手动搜索 | AI 助手 | 效率提升 |
|---|---|---|---|
| 单源查询 | 3 分钟 | 10 秒 | 18 倍 |
| 多源对比 | 15 分钟 | 30 秒 | 30 倍 |
| 技术文档查询 | 8 分钟 | 15 秒 | 32 倍 |
| 深度研究 | 1 小时 + | 2 分钟 | 30 倍 + |
import argparse
import asyncio
from pathlib import Path
import json
class AIToolsCLI:
"""AI 工具命令行界面"""
def __init__(self):
self.llm = LLMClient()
self.summarizer = DocumentSummarizer(self.llm)
self.code_assistant = InteractiveCodeAssistant(self.llm)
self.researcher = IntelligentResearcher(self.llm, SearchEngine())
async def run(self):
parser = argparse.ArgumentParser(description="AI 工具集 - 你的智能助手", formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""示例:
# 总结文档
python ai_tools.py summarize paper.pdf
# 生成代码
python ai_tools.py code "用 Python 写一个爬虫"
# 研究问题
python ai_tools.py research "量子计算的原理"""")
subparsers = parser.add_subparsers(dest='command', help='可用命令')
# summarize 命令
sum_parser = subparsers.add_parser('summarize', help='总结文档')
sum_parser.add_argument('file', help='文件路径或 URL')
sum_parser.add_argument('-t', '--type', default='file', choices=['file', 'url'], help='输入类型')
sum_parser.add_argument('-o', '--output', help='输出文件路径')
# code 命令
code_parser = subparsers.add_parser('code', help='生成/处理代码')
code_parser.add_argument('prompt', help='需求或代码')
code_parser.add_argument('-m', '--mode', choices=['generate', 'explain', 'optimize', 'debug'], default='generate', help='处理模式')
code_parser.add_argument('-l', '--language', default='python', help='编程语言')
code_parser.add_argument('-x', '--execute', action='store_true', help='执行生成的代码')
# research 命令
res_parser = subparsers.add_parser('research', help='研究问题')
res_parser.add_argument('question', help='研究问题')
res_parser.add_argument('-d', '--depth', type=int, default=1, choices=[1, 2, 3], help='研究深度')
res_parser.add_argument('-s', '--sources', nargs='+', choices=['google', 'docs', 'stackoverflow'], help='指定搜索源')
args = parser.parse_args()
if not args.command:
parser.print_help()
return
if args.command == 'summarize':
await self._cmd_summarize(args)
elif args.command == 'code':
await self._cmd_code(args)
elif args.command == 'research':
await self._cmd_research(args)
async def _cmd_summarize(self, args):
print(f"📖 正在总结:{args.file}
**📊 统计信息**
- 字数:{result.word_count}
- 预计阅读时间:{result.reading_time} 分钟
- 生成时间:{result.created_at}
**🔑 关键要点**
{().join( i, p (result.key_points))}
**📝 总结**
{result.summary}
**📊 置信度**: {result.confidence*}%
{result.answer}
i, source (result.sources, ):
()
()
()
result.related_questions:
()
q result.related_questions:
()
():
cli = AIToolsCLI()
cli.run()
__name__ == :
asyncio.run(main())
# 总结论文
python ai_tools.py summarize research_paper.pdf -o summary.md
# 生成代码并执行
python ai_tools.py code "用 Python 写一个二分查找" -x
# 解释代码
python ai_tools.py code "explain this code: `def foo(): return 1`" -m explain
# 深度研究
python ai_tools.py research "RAG 和 Fine-tuning 的区别" -d 2
| 使用场景 | 月调用量 | 月成本 | 对比 ChatGPT Plus |
|---|---|---|---|
| 轻度使用 | 10 万 tokens | ¥5 | 省 75% |
| 中度使用 | 100 万 tokens | ¥50 | 省 60% |
| 重度使用 | 1000 万 tokens | ¥500 | 省 40% |
ai-tools/
├── src/
│ ├── __init__.py
│ ├── llm.py # LLM 客户端
│ ├── summarizer.py # 文档总结器
│ ├── code_generator.py # 代码生成器
│ └── researcher.py # 研究助手
├── cli.py # 命令行入口
├── config.py # 配置管理
├── requirements.txt # 依赖列表
├── .env.example # 环境变量示例
├── README.md # 使用文档
└── examples/
├── example_summarize.py
├── example_code.py
└── example_research.py
# Dockerfile
FROM python:3.11-slim
WORKDIR /app
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY src/ ./src/
COPY cli.py .
COPY config.py .
ENV PYTHONPATH=/app
CMD ["python", "cli.py", "--help"]
# docker-compose.yml
version: '3.8'
services:
ai-tools:
build: .
env_file:
- .env
volumes:
- ./data:/app/data
ports:
- "8000:8000"
| 功能方向 | 实现方式 | 难度 |
|---|---|---|
| Web 界面 | FastAPI + Vue3 | ⭐⭐⭐ |
| 多模态支持 | GPT-4V 处理图片 | ⭐⭐ |
| 语音交互 | Whisper + TTS | ⭐⭐⭐ |
| 本地模型 | Ollama + Llama3 | ⭐⭐⭐⭐ |
| Agent 能力 | 添加工具调用 | ⭐⭐⭐⭐ |
通过这篇文章,我们用 Python 打造了三个强大的 AI 工具:
| 工具 | 核心价值 | 适用场景 |
|---|---|---|
| 智能文档总结器 | 10 秒读完 100 页 | 论文研读、报告分析 |
| AI 代码生成器 | 说人话写代码 | 快速原型、学习参考 |
| 智能资料助手 | 秒速精准检索 | 技术调研、问题解决 |

微信公众号「极客日志」,在微信中扫描左侧二维码关注。展示文案:极客日志 zeeklog
使用加密算法(如AES、TripleDES、Rabbit或RC4)加密和解密文本明文。 在线工具,加密/解密文本在线工具,online
生成新的随机RSA私钥和公钥pem证书。 在线工具,RSA密钥对生成器在线工具,online
基于 Mermaid.js 实时预览流程图、时序图等图表,支持源码编辑与即时渲染。 在线工具,Mermaid 预览与可视化编辑在线工具,online
解析常见 curl 参数并生成 fetch、axios、PHP curl 或 Python requests 示例代码。 在线工具,curl 转代码在线工具,online
将字符串编码和解码为其 Base64 格式表示形式即可。 在线工具,Base64 字符串编码/解码在线工具,online
将字符串、文件或图像转换为其 Base64 表示形式。 在线工具,Base64 文件转换器在线工具,online