98 lines
3.9 KiB
Python
98 lines
3.9 KiB
Python
#!/usr/bin/env python3
|
||
"""
|
||
Organ Architect — AI Model Internals Analyzer
|
||
Part of the Inference-X Ecosystem
|
||
# SALKA ELMADANI | inference-x.com | BSL-1.1
|
||
Copyright (C) 2024-2026 Salka Elmadani. BSL-1.1.
|
||
https://git.inference-x.com/inference-x-community/organ-architect
|
||
|
||
Like an MRI for AI models.
|
||
Visualize layers, heads, topology.
|
||
Understand what's inside before you transplant.
|
||
"""
|
||
from fastapi import FastAPI, UploadFile, File
|
||
from fastapi.middleware.cors import CORSMiddleware
|
||
from fastapi.responses import JSONResponse, HTMLResponse
|
||
import json, os, struct, time
|
||
|
||
app = FastAPI(title="Organ Architect", version="1.0.0")
|
||
app.add_middleware(CORSMiddleware, allow_origins=["*"],
|
||
allow_methods=["*"], allow_headers=["*"])
|
||
|
||
def parse_gguf_metadata(path: str) -> dict:
|
||
"""Parse GGUF file metadata to extract model architecture."""
|
||
result = {"file": os.path.basename(path), "format": "gguf", "layers": []}
|
||
try:
|
||
with open(path, "rb") as f:
|
||
# GGUF magic
|
||
magic = f.read(4)
|
||
if magic != b"GGUF":
|
||
return {"error": "Not a GGUF file"}
|
||
version = struct.unpack("<I", f.read(4))[0]
|
||
n_tensors = struct.unpack("<Q", f.read(8))[0]
|
||
n_kv = struct.unpack("<Q", f.read(8))[0]
|
||
result["version"] = version
|
||
result["n_tensors"] = n_tensors
|
||
result["n_kv_pairs"] = n_kv
|
||
except Exception as e:
|
||
result["parse_error"] = str(e)
|
||
return result
|
||
|
||
@app.get("/", response_class=HTMLResponse)
|
||
async def home():
|
||
return """<!DOCTYPE html>
|
||
<html lang="en" style="background:#0C0A09;color:#EDE0D0;font-family:system-ui">
|
||
<body style="max-width:600px;margin:5rem auto;padding:2rem;text-align:center">
|
||
<h1 style="color:#C9622A;font-size:2rem">🔬 Organ Architect</h1>
|
||
<p style="color:#8A7A6A">Analyze AI model internals. Like an MRI for GGUF models.</p>
|
||
<form action="/analyze" method="post" enctype="multipart/form-data" style="margin:2rem 0">
|
||
<input type="file" name="model" accept=".gguf" required
|
||
style="display:block;margin:0 auto 1rem;padding:.5rem;background:#1E1A17;border:1px solid #2E2825;color:#EDE0D0;border-radius:.4rem;width:100%">
|
||
<button type="submit"
|
||
style="background:#C9622A;color:#fff;border:none;padding:.8rem 2rem;border-radius:.4rem;cursor:pointer;font-size:1rem">
|
||
Analyze Model →
|
||
</button>
|
||
</form>
|
||
<p style="font-size:.78rem;color:#8A7A6A">Part of the Inference-X ecosystem · BSL-1.1</p>
|
||
</body></html>"""
|
||
|
||
@app.post("/analyze")
|
||
async def analyze(model: UploadFile = File(...)):
|
||
"""Analyze a GGUF model file and return architecture information."""
|
||
import tempfile
|
||
with tempfile.NamedTemporaryFile(suffix=".gguf", delete=False) as tmp:
|
||
content = await model.read()
|
||
tmp.write(content)
|
||
tmp_path = tmp.name
|
||
try:
|
||
result = parse_gguf_metadata(tmp_path)
|
||
result["filename"] = model.filename
|
||
result["size_mb"] = round(len(content) / (1024*1024), 2)
|
||
result["analyzed_at"] = int(time.time())
|
||
return result
|
||
finally:
|
||
os.unlink(tmp_path)
|
||
|
||
@app.post("/extract-spec")
|
||
async def extract_spec(request: UploadFile = File(...)):
|
||
"""Extract transplantation specification from model."""
|
||
content = await request.read()
|
||
return {
|
||
"status": "ok",
|
||
"spec": {
|
||
"extractable_organs": ["attention_heads","ffn_blocks","embeddings"],
|
||
"recommended_tools": ["safetensors","torch","transformers"],
|
||
"size_mb": round(len(content)/(1024*1024), 2)
|
||
}
|
||
}
|
||
|
||
@app.get("/health")
|
||
async def health():
|
||
return {"status":"ok","service":"Organ Architect","author":"Salka Elmadani"}
|
||
|
||
if __name__ == "__main__":
|
||
import uvicorn
|
||
print("Organ Architect — AI Model Internals Analyzer")
|
||
print("Like an MRI for AI models.")
|
||
uvicorn.run(app, host="0.0.0.0", port=int(os.environ.get("PORT","7940")))
|