Update README.MD and add nano-claude-code v3.0 + original-source-code/src

- README.MD: add original-source-code and nano-claude-code sections, update
  overview table (4 subprojects), add v3.0 news entry, expand comparison table
  with memory/multi-agent/skills dimensions
- nano-claude-code v3.0: multi-agent package (multi_agent/), memory package
  (memory/), skill package (skill/) with built-in /commit and /review skills,
  context compression (compaction.py), tool registry plugin system, diff view,
  17 slash commands, 18 built-in tools, 101 tests (~5000 lines total)
- original-source-code/src: add raw TypeScript source tree (1884 files)

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
chauncygu
2026-04-03 10:26:29 -07:00
parent 3de4c595ea
commit 1d4ffa964d
1942 changed files with 521644 additions and 112 deletions

View File

@@ -29,6 +29,7 @@ PROVIDERS: dict[str, dict] = {
"anthropic": {
"type": "anthropic",
"api_key_env": "ANTHROPIC_API_KEY",
"context_limit": 200000,
"models": [
"claude-opus-4-6", "claude-sonnet-4-6", "claude-haiku-4-5-20251001",
"claude-opus-4-5", "claude-sonnet-4-5",
@@ -39,6 +40,7 @@ PROVIDERS: dict[str, dict] = {
"type": "openai",
"api_key_env": "OPENAI_API_KEY",
"base_url": "https://api.openai.com/v1",
"context_limit": 128000,
"models": [
"gpt-4o", "gpt-4o-mini", "gpt-4-turbo",
"o3-mini", "o1", "o1-mini",
@@ -48,6 +50,7 @@ PROVIDERS: dict[str, dict] = {
"type": "openai",
"api_key_env": "GEMINI_API_KEY",
"base_url": "https://generativelanguage.googleapis.com/v1beta/openai/",
"context_limit": 1000000,
"models": [
"gemini-2.5-pro-preview-03-25",
"gemini-2.0-flash", "gemini-2.0-flash-lite",
@@ -58,6 +61,7 @@ PROVIDERS: dict[str, dict] = {
"type": "openai",
"api_key_env": "MOONSHOT_API_KEY",
"base_url": "https://api.moonshot.cn/v1",
"context_limit": 128000,
"models": [
"moonshot-v1-8k", "moonshot-v1-32k", "moonshot-v1-128k",
"kimi-latest",
@@ -67,6 +71,7 @@ PROVIDERS: dict[str, dict] = {
"type": "openai",
"api_key_env": "DASHSCOPE_API_KEY",
"base_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
"context_limit": 1000000,
"models": [
"qwen-max", "qwen-plus", "qwen-turbo", "qwen-long",
"qwen2.5-72b-instruct", "qwen2.5-coder-32b-instruct",
@@ -77,6 +82,7 @@ PROVIDERS: dict[str, dict] = {
"type": "openai",
"api_key_env": "ZHIPU_API_KEY",
"base_url": "https://open.bigmodel.cn/api/paas/v4/",
"context_limit": 128000,
"models": [
"glm-4-plus", "glm-4", "glm-4-flash", "glm-4-air",
"glm-z1-flash",
@@ -86,6 +92,7 @@ PROVIDERS: dict[str, dict] = {
"type": "openai",
"api_key_env": "DEEPSEEK_API_KEY",
"base_url": "https://api.deepseek.com/v1",
"context_limit": 64000,
"models": [
"deepseek-chat", "deepseek-coder", "deepseek-reasoner",
],
@@ -95,6 +102,7 @@ PROVIDERS: dict[str, dict] = {
"api_key_env": None,
"base_url": "http://localhost:11434/v1",
"api_key": "ollama",
"context_limit": 128000,
"models": [
"llama3.3", "llama3.2", "phi4", "mistral", "mixtral",
"qwen2.5-coder", "deepseek-r1", "gemma3",
@@ -105,12 +113,14 @@ PROVIDERS: dict[str, dict] = {
"api_key_env": None,
"base_url": "http://localhost:1234/v1",
"api_key": "lm-studio",
"context_limit": 128000,
"models": [], # dynamic, depends on loaded model
},
"custom": {
"type": "openai",
"api_key_env": "CUSTOM_API_KEY",
"base_url": None, # read from config["custom_base_url"]
"context_limit": 128000,
"models": [],
},
}
@@ -277,8 +287,9 @@ def messages_to_openai(messages: list) -> list:
msg: dict = {"role": "assistant", "content": m.get("content") or None}
tcs = m.get("tool_calls", [])
if tcs:
msg["tool_calls"] = [
{
msg["tool_calls"] = []
for tc in tcs:
tc_msg = {
"id": tc["id"],
"type": "function",
"function": {
@@ -286,8 +297,10 @@ def messages_to_openai(messages: list) -> list:
"arguments": json.dumps(tc["input"], ensure_ascii=False),
},
}
for tc in tcs
]
# Pass through provider-specific fields (e.g. Gemini thought_signature)
if tc.get("extra_content"):
tc_msg["extra_content"] = tc["extra_content"]
msg["tool_calls"].append(tc_msg)
result.append(msg)
elif role == "tool":
@@ -425,7 +438,7 @@ def stream_openai_compat(
for tc in delta.tool_calls:
idx = tc.index
if idx not in tool_buf:
tool_buf[idx] = {"id": "", "name": "", "args": ""}
tool_buf[idx] = {"id": "", "name": "", "args": "", "extra_content": None}
if tc.id:
tool_buf[idx]["id"] = tc.id
if tc.function:
@@ -433,6 +446,10 @@ def stream_openai_compat(
tool_buf[idx]["name"] += tc.function.name
if tc.function.arguments:
tool_buf[idx]["args"] += tc.function.arguments
# Capture extra_content (e.g. Gemini thought_signature)
extra = getattr(tc, "extra_content", None)
if extra:
tool_buf[idx]["extra_content"] = extra
# Some providers include usage in the last chunk
if hasattr(chunk, "usage") and chunk.usage:
@@ -446,7 +463,10 @@ def stream_openai_compat(
inp = json.loads(v["args"]) if v["args"] else {}
except json.JSONDecodeError:
inp = {"_raw": v["args"]}
tool_calls.append({"id": v["id"] or f"call_{idx}", "name": v["name"], "input": inp})
tc_entry = {"id": v["id"] or f"call_{idx}", "name": v["name"], "input": inp}
if v.get("extra_content"):
tc_entry["extra_content"] = v["extra_content"]
tool_calls.append(tc_entry)
yield AssistantTurn(text, tool_calls, in_tok, out_tok)