162 lines
5.7 KiB
Python
Executable File
162 lines
5.7 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
"""
|
|
Quick chimera assembler v2: FIXED organ header handling.
|
|
Organ .bin files have: [name_len(4) + name + n_dims(4) + dims(8*n) + dtype(4)] + DATA
|
|
We must skip the header and only copy the DATA portion.
|
|
CSCI v1.0 — Cross-Scale Coherence Index
|
|
"""
|
|
import struct, sys, os, json
|
|
|
|
def read_organ_header(f):
|
|
"""Read organ bin header, return data start position."""
|
|
name_len = struct.unpack('<I', f.read(4))[0]
|
|
f.read(name_len) # skip name
|
|
n_dims = struct.unpack('<I', f.read(4))[0]
|
|
for _ in range(n_dims):
|
|
f.read(8) # skip dims
|
|
f.read(4) # skip dtype
|
|
return f.tell()
|
|
|
|
def main():
|
|
if len(sys.argv) < 4:
|
|
print("Usage: quick_chimera_v2.py <source.gguf> <organs_dir> <output.gguf>")
|
|
sys.exit(1)
|
|
|
|
source_gguf = sys.argv[1]
|
|
organs_dir = sys.argv[2]
|
|
output_gguf = sys.argv[3]
|
|
|
|
f = open(source_gguf, "rb")
|
|
magic = struct.unpack("<I", f.read(4))[0]
|
|
version = struct.unpack("<I", f.read(4))[0]
|
|
n_tensors = struct.unpack("<Q", f.read(8))[0]
|
|
n_metadata = struct.unpack("<Q", f.read(8))[0]
|
|
|
|
print(f"Source: {os.path.basename(source_gguf)}")
|
|
print(f" Version: {version}, Tensors: {n_tensors}, Metadata: {n_metadata}")
|
|
|
|
def read_string():
|
|
slen = struct.unpack("<Q", f.read(8))[0]
|
|
return f.read(slen).decode("utf-8")
|
|
|
|
def skip_value(vtype):
|
|
sizes = {0:1, 1:1, 2:2, 3:2, 4:4, 5:4, 6:4, 7:1, 10:8, 11:8, 12:8}
|
|
if vtype in sizes:
|
|
f.read(sizes[vtype])
|
|
elif vtype == 8:
|
|
read_string()
|
|
elif vtype == 9:
|
|
arr_type = struct.unpack("<I", f.read(4))[0]
|
|
arr_len = struct.unpack("<Q", f.read(8))[0]
|
|
for _ in range(arr_len):
|
|
skip_value(arr_type)
|
|
else:
|
|
raise ValueError(f"Unknown type: {vtype}")
|
|
|
|
for _ in range(n_metadata):
|
|
read_string()
|
|
vtype = struct.unpack("<I", f.read(4))[0]
|
|
skip_value(vtype)
|
|
|
|
tensor_info = []
|
|
for _ in range(n_tensors):
|
|
name = read_string()
|
|
n_dims = struct.unpack("<I", f.read(4))[0]
|
|
dims = [struct.unpack("<Q", f.read(8))[0] for _ in range(n_dims)]
|
|
dtype = struct.unpack("<I", f.read(4))[0]
|
|
offset = struct.unpack("<Q", f.read(8))[0]
|
|
tensor_info.append({"name": name, "dims": dims, "dtype": dtype, "offset": offset})
|
|
|
|
pos = f.tell()
|
|
padding = (32 - (pos % 32)) % 32
|
|
f.read(padding)
|
|
data_start = f.tell()
|
|
|
|
print(f" Header: {data_start} bytes")
|
|
|
|
# Copy full header intact
|
|
f.seek(0)
|
|
header = f.read(data_start)
|
|
|
|
# Build organ map
|
|
organ_map = {}
|
|
for category in ["skeleton", "organs", "embed", "norm", "adapters", "unknown"]:
|
|
cat_dir = os.path.join(organs_dir, category)
|
|
if os.path.isdir(cat_dir):
|
|
for fname in os.listdir(cat_dir):
|
|
if fname.endswith(".bin"):
|
|
tname = fname[:-4]
|
|
organ_map[tname] = os.path.join(cat_dir, fname)
|
|
|
|
print(f" Organ files: {len(organ_map)}")
|
|
|
|
# Write output
|
|
out = open(output_gguf, "wb")
|
|
out.write(header)
|
|
|
|
from_organ = 0
|
|
from_source = 0
|
|
|
|
for i, ti in enumerate(tensor_info):
|
|
name = ti["name"]
|
|
safe_name = name.replace(".", "_")
|
|
organ_path = organ_map.get(safe_name)
|
|
|
|
# Calculate expected tensor size from source GGUF
|
|
if i + 1 < len(tensor_info):
|
|
expected_size = tensor_info[i + 1]["offset"] - ti["offset"]
|
|
else:
|
|
f.seek(0, 2)
|
|
file_end = f.tell()
|
|
expected_size = file_end - (data_start + ti["offset"])
|
|
|
|
if organ_path and os.path.exists(organ_path):
|
|
# Read organ file, SKIP HEADER, only take data
|
|
with open(organ_path, "rb") as organ_f:
|
|
read_organ_header(organ_f)
|
|
data = organ_f.read()
|
|
|
|
# Verify size match
|
|
if len(data) == expected_size:
|
|
out.write(data)
|
|
from_organ += 1
|
|
else:
|
|
# Size mismatch — fall back to source
|
|
f.seek(data_start + ti["offset"])
|
|
data = f.read(expected_size)
|
|
out.write(data)
|
|
from_source += 1
|
|
else:
|
|
# Fallback: read from source GGUF
|
|
f.seek(data_start + ti["offset"])
|
|
data = f.read(expected_size)
|
|
out.write(data)
|
|
from_source += 1
|
|
|
|
out.close()
|
|
f.close()
|
|
|
|
final_size = os.path.getsize(output_gguf)
|
|
source_size = os.path.getsize(source_gguf)
|
|
print(f"\n Output: {output_gguf}")
|
|
print(f" Size: {final_size / (1024**3):.2f} GB (source: {source_size / (1024**3):.2f} GB)")
|
|
print(f" From organs: {from_organ}, From source: {from_source}, Total: {from_organ+from_source}/{n_tensors}")
|
|
|
|
# Integrity check: sizes should match
|
|
if abs(final_size - source_size) < 1024:
|
|
print(f" INTEGRITY: ✓ PASS (size match)")
|
|
else:
|
|
diff = final_size - source_size
|
|
print(f" INTEGRITY: ✗ MISMATCH ({diff:+d} bytes)")
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|
|
# ╔══ SALKA ELMADANI AUTHORSHIP CERTIFICATE ══╗
|
|
# © Salka Elmadani 2025-2026 — ALL RIGHTS RESERVED
|
|
# Licensed under Business Source License 1.1 — https://inference-x.com
|
|
# ─────────────────────────────────────────────────────────
|
|
# SHA256: 6587e64dbf1c6fe2160fe8f2e25a33e6ed5e98193baea7f7523a9495e04b9154
|
|
# SIG-ED25519: TrwO40O2Qn0ysnadlzX38fBTSOF5St11SyZTSc4cZP/7k5HM+ifnqDMTu/vkZWDYAdmb+5bc6IhpYYQgVdLsBA==
|
|
# VERIFY: python3 verify_authorship.py quick_chimera_v2.py
|