129 lines
4.4 KiB
Python
129 lines
4.4 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
Quick chimera assembler: Copy source GGUF header/metadata intact,
|
|
replace tensor data from organ directory.
|
|
"""
|
|
import struct, sys, os, json
|
|
|
|
def main():
|
|
if len(sys.argv) < 4:
|
|
print("Usage: quick_chimera.py <source.gguf> <organs_dir> <output.gguf>")
|
|
sys.exit(1)
|
|
|
|
source_gguf = sys.argv[1]
|
|
organs_dir = sys.argv[2]
|
|
output_gguf = sys.argv[3]
|
|
|
|
f = open(source_gguf, "rb")
|
|
magic = struct.unpack("<I", f.read(4))[0]
|
|
version = struct.unpack("<I", f.read(4))[0]
|
|
n_tensors = struct.unpack("<Q", f.read(8))[0]
|
|
n_metadata = struct.unpack("<Q", f.read(8))[0]
|
|
|
|
print(f"Source: {os.path.basename(source_gguf)}")
|
|
print(f" Version: {version}, Tensors: {n_tensors}, Metadata: {n_metadata}")
|
|
|
|
def read_string():
|
|
slen = struct.unpack("<Q", f.read(8))[0]
|
|
return f.read(slen).decode("utf-8")
|
|
|
|
def skip_value(vtype):
|
|
sizes = {0:1, 1:1, 2:2, 3:2, 4:4, 5:4, 6:4, 7:1, 10:8, 11:8, 12:8}
|
|
if vtype in sizes:
|
|
f.read(sizes[vtype])
|
|
elif vtype == 8:
|
|
read_string()
|
|
elif vtype == 9:
|
|
arr_type = struct.unpack("<I", f.read(4))[0]
|
|
arr_len = struct.unpack("<Q", f.read(8))[0]
|
|
for _ in range(arr_len):
|
|
skip_value(arr_type)
|
|
else:
|
|
raise ValueError(f"Unknown type: {vtype}")
|
|
|
|
for _ in range(n_metadata):
|
|
read_string()
|
|
vtype = struct.unpack("<I", f.read(4))[0]
|
|
skip_value(vtype)
|
|
|
|
tensor_info = []
|
|
for _ in range(n_tensors):
|
|
name = read_string()
|
|
n_dims = struct.unpack("<I", f.read(4))[0]
|
|
dims = [struct.unpack("<Q", f.read(8))[0] for _ in range(n_dims)]
|
|
dtype = struct.unpack("<I", f.read(4))[0]
|
|
offset = struct.unpack("<Q", f.read(8))[0]
|
|
tensor_info.append({"name": name, "dims": dims, "dtype": dtype, "offset": offset})
|
|
|
|
pos = f.tell()
|
|
padding = (32 - (pos % 32)) % 32
|
|
f.read(padding)
|
|
data_start = f.tell()
|
|
|
|
print(f" Header: {data_start} bytes, Data start: {data_start}")
|
|
|
|
# Copy full header
|
|
f.seek(0)
|
|
header = f.read(data_start)
|
|
|
|
# Build organ map
|
|
organ_map = {}
|
|
for category in ["skeleton", "organs", "embed", "norm", "adapters", "unknown"]:
|
|
cat_dir = os.path.join(organs_dir, category)
|
|
if os.path.isdir(cat_dir):
|
|
for fname in os.listdir(cat_dir):
|
|
if fname.endswith(".bin"):
|
|
tname = fname[:-4]
|
|
organ_map[tname] = os.path.join(cat_dir, fname)
|
|
|
|
print(f" Organ files: {len(organ_map)}")
|
|
|
|
# Write output
|
|
out = open(output_gguf, "wb")
|
|
out.write(header)
|
|
|
|
written = 0
|
|
fallback = 0
|
|
for ti in tensor_info:
|
|
name = ti["name"]
|
|
safe_name = name.replace(".", "_")
|
|
organ_path = organ_map.get(safe_name)
|
|
|
|
if organ_path and os.path.exists(organ_path):
|
|
data = open(organ_path, "rb").read()
|
|
out.write(data)
|
|
written += 1
|
|
else:
|
|
# Fallback: read from source GGUF
|
|
# Calculate tensor size
|
|
next_idx = tensor_info.index(ti) + 1
|
|
if next_idx < len(tensor_info):
|
|
size = tensor_info[next_idx]["offset"] - ti["offset"]
|
|
else:
|
|
f.seek(0, 2)
|
|
file_end = f.tell()
|
|
size = file_end - (data_start + ti["offset"])
|
|
|
|
f.seek(data_start + ti["offset"])
|
|
data = f.read(size)
|
|
out.write(data)
|
|
fallback += 1
|
|
|
|
out.close()
|
|
f.close()
|
|
|
|
final_size = os.path.getsize(output_gguf)
|
|
print(f"\n Output: {output_gguf}")
|
|
print(f" Size: {final_size / (1024**3):.2f} GB")
|
|
print(f" From organs: {written}, From source: {fallback}, Total: {written+fallback}/{n_tensors}")
|
|
|
|
if __name__ == "__main__":
|
|
main()
|
|
# ╔══ SALKA ELMADANI AUTHORSHIP CERTIFICATE ══╗
|
|
# © Salka Elmadani 2025-2026 — ALL RIGHTS RESERVED
|
|
# Licensed under Business Source License 1.1 — https://inference-x.com
|
|
# ─────────────────────────────────────────────────────────
|
|
# SHA256: b0d040908eddc26078e86f76e361825fada5c2676778789ef41c1804730eb10d
|
|
# SIG-ED25519: srq6F3EyKqi7r3nlB6cfI1u53J1GpsC2ty9zNsBDrZ2EldVVIhE1mWCdnd/qkvgif783DOlLQ4Zb2CCw13XfBQ==
|
|
# VERIFY: python3 verify_authorship.py quick_chimera.py
|