moved to full client

This commit is contained in:
Andreas Wilms
2026-03-09 16:12:01 +01:00
parent 8a1f133e50
commit 7293839a19
5 changed files with 149 additions and 676 deletions

View File

@@ -1,92 +0,0 @@
import { spawn } from "child_process";
import { writeFile, readFile, mkdir, unlink } from "fs/promises";
import { NextRequest, NextResponse } from "next/server";
import path from "path";
import os from "os";
export async function POST(req: NextRequest) {
try {
const formData = await req.formData();
const file = formData.get("file") as File;
if (!file) {
return NextResponse.json({ error: "No file provided" }, { status: 400 });
}
const tempDir = path.join(os.tmpdir(), "xgrids-pipeline");
await mkdir(tempDir, { recursive: true });
const safeName = file.name.replace(/[^a-z0-9.]/gi, "_").toLowerCase();
const timestamp = Date.now();
const inputPath = path.join(tempDir, `${timestamp}_${safeName}`);
const outputPath = inputPath.replace(/\.(lcc|lci|bin)$/i, ".ply");
// DETERMINE WHICH SCRIPT TO RUN
let scriptName = "convert_lci_to_ply.py";
if (file.name.toLowerCase().includes("environment.bin")) {
scriptName = "convert_env_to_ply.py";
}
const scriptPath = path.join(
process.cwd(),
"scripts",
"preprocess",
scriptName,
);
const buffer = Buffer.from(await file.arrayBuffer());
await writeFile(inputPath, buffer);
return new Promise<NextResponse>((resolve) => {
const pythonProcess = spawn("python3", [
scriptPath,
inputPath,
outputPath,
]);
let errorOutput = "";
pythonProcess.stderr.on("data", (data) => {
errorOutput += data.toString();
});
pythonProcess.on("close", async (code) => {
if (code !== 0) {
await unlink(inputPath).catch(() => {});
return resolve(
NextResponse.json(
{ error: `Python failed (${scriptName}): ${errorOutput}` },
{ status: 500 },
),
);
}
try {
const plyBuffer = await readFile(outputPath);
await Promise.all([
unlink(inputPath).catch(() => {}),
unlink(outputPath).catch(() => {}),
]);
resolve(
new NextResponse(plyBuffer, {
status: 200,
headers: {
"Content-Type": "application/octet-stream",
"Content-Disposition": `attachment; filename="${file.name.replace(/\.[^/.]+$/, "")}.ply"`,
},
}),
);
} catch (e) {
resolve(
NextResponse.json(
{ error: "Failed to read output PLY" },
{ status: 500 },
),
);
}
});
});
} catch (error: any) {
return NextResponse.json({ error: error.message }, { status: 500 });
}
}

View File

@@ -49,42 +49,10 @@ export default function XgridsWizard() {
const startPipeline = async () => { const startPipeline = async () => {
if (!files.length || !workerRef.current) return; if (!files.length || !workerRef.current) return;
// FIND SPECIFIC ENTRIES
const lciFile = files.find((f) => f.name.toLowerCase() === "collision.lci");
const lccFile = files.find((f) => f.name.toLowerCase().endsWith(".lcc"));
if (!lciFile) {
setStatus("Error: 'collision.lci' not found in folder.");
return;
}
if (!lccFile) {
setStatus("Error: Main '.lcc' scene file not found.");
return;
}
setIsProcessing(true); setIsProcessing(true);
setStatus("Preparing files for Worker...");
try { const mainLcc = files.find((f) => f.name.toLowerCase().endsWith(".lcc"));
// --- PHASE 1: Python (Only collision.lci) ---
setStatus("Step 1/2: Converting collision.lci to PLY...");
const formData = new FormData();
formData.append("file", lciFile);
const pyResponse = await fetch("/api/convert", {
method: "POST",
body: formData,
});
if (!pyResponse.ok) {
const err = await pyResponse.json();
throw new Error(err.error || "Python script failed");
}
const plyBlob = await pyResponse.blob();
downloadFile(plyBlob, "collision_mesh.ply");
// --- PHASE 2: Worker (The whole folder context) ---
setStatus("Step 2/2: Generating Splats and LODs from .lcc...");
const filesData = await Promise.all( const filesData = await Promise.all(
files.map(async (f) => ({ files.map(async (f) => ({
@@ -93,22 +61,17 @@ export default function XgridsWizard() {
})), })),
); );
const buffersToTransfer = filesData.map((f) => f.buffer); // Send EVERYTHING to the worker.
// The worker will now handle Collision, Environment, SOG, and LODs.
workerRef.current.postMessage( workerRef.current.postMessage(
{ {
type: "START_CONVERSION", type: "START_CONVERSION",
filesData, filesData,
mainLccName: lccFile.name, mainLccName: mainLcc?.name,
fileName: lccFile.name.replace(".lcc", ""), fileName: mainLcc?.name.replace(".lcc", ""),
}, },
buffersToTransfer, filesData.map((f) => f.buffer),
); );
} catch (error: any) {
console.error(error);
setStatus(`Error: ${error.message}`);
setIsProcessing(false);
}
}; };
return ( return (

View File

@@ -1,56 +1,117 @@
/* eslint-disable no-restricted-globals */
/**
* Converts XGrids .lci binary to PLY Mesh
*/
function parseLci(buffer: ArrayBuffer): Blob {
const view = new DataView(buffer);
const LCI_MAGIC = 0x6c6c6f63; // 'coll'
if (view.getUint32(0, true) !== LCI_MAGIC) {
throw new Error("Invalid LCI Magic Number");
}
const meshNum = view.getUint32(44, true);
const meshes = [];
for (let i = 0; i < meshNum; i++) {
const offset = 48 + i * 40;
meshes.push({
dataOffset: Number(view.getBigUint64(offset + 8, true)),
vertexNum: view.getUint32(offset + 24, true),
faceNum: view.getUint32(offset + 28, true),
});
}
let verticesStr = "";
let facesStr = "";
let vOffset = 0;
for (const m of meshes) {
let pos = m.dataOffset;
for (let j = 0; j < m.vertexNum; j++) {
verticesStr += `${view.getFloat32(pos, true).toFixed(6)} ${view.getFloat32(pos + 4, true).toFixed(6)} ${view.getFloat32(pos + 8, true).toFixed(6)}\n`;
pos += 12;
}
for (let j = 0; j < m.faceNum; j++) {
facesStr += `3 ${view.getUint32(pos, true) + vOffset} ${view.getUint32(pos + 4, true) + vOffset} ${view.getUint32(pos + 8, true) + vOffset}\n`;
pos += 12;
}
vOffset += m.vertexNum;
}
const header = `ply\nformat ascii 1.0\nelement vertex ${vOffset}\nproperty float x\nproperty float y\nproperty float z\nelement face ${meshes.reduce((a, b) => a + b.faceNum, 0)}\nproperty list uchar int vertex_indices\nend_header\n`;
return new Blob([header + verticesStr + facesStr], {
type: "application/octet-stream",
});
}
/**
* Converts XGrids environment.bin to PlayCanvas Gaussian Splat PLY
*/
function parseEnvironment(buffer: ArrayBuffer): Blob {
const view = new DataView(buffer);
const POINT_SIZE = 44;
const numPoints = Math.floor(buffer.byteLength / POINT_SIZE);
let ply = `ply\nformat ascii 1.0\nelement vertex ${numPoints}\nproperty float x\nproperty float y\nproperty float z\nproperty float scale_0\nproperty float scale_1\nproperty float scale_2\nproperty float rot_0\nproperty float rot_1\nproperty float rot_2\nproperty float rot_3\nproperty float opacity\nend_header\n`;
for (let i = 0; i < numPoints; i++) {
const offset = i * POINT_SIZE;
const row = [];
for (let j = 0; j < 11; j++) {
row.push(view.getFloat32(offset + j * 4, true).toFixed(6));
}
ply += row.join(" ") + "\n";
}
return new Blob([ply], { type: "application/octet-stream" });
}
// --- WORKER INFRASTRUCTURE ---
const originalFetch = globalThis.fetch; const originalFetch = globalThis.fetch;
globalThis.fetch = async (input, init) => { globalThis.fetch = async (input, init) => {
const url = input instanceof Request ? input.url : input.toString(); const url = input instanceof Request ? input.url : input.toString();
self.postMessage({ type: "LOG", message: `FETCH: ${url}` }); if (url.includes("webp.wasm"))
return originalFetch("/workers/webp.wasm", init);
if (url.includes("webp.wasm")) {
self.postMessage({
type: "LOG",
message: `INTERCEPTED → /workers/webp.wasm`,
});
const res = await originalFetch("/workers/webp.wasm", init);
self.postMessage({
type: "LOG",
message: `WASM response status: ${res.status}`,
});
return res;
}
return originalFetch(input, init); return originalFetch(input, init);
}; };
// Intercept XMLHttpRequest (Emscripten uses this in Workers)
if (typeof XMLHttpRequest !== "undefined") {
const originalOpen = XMLHttpRequest.prototype.open;
// @ts-ignore
XMLHttpRequest.prototype.open = function (
method: string,
url: string | URL,
...rest: any[]
) {
if (typeof url === "string" && url.includes("webp.wasm")) {
url = "/workers/webp.wasm";
}
return originalOpen.apply(this, [method, url, ...rest] as any);
};
}
self.onmessage = async (e: MessageEvent) => { self.onmessage = async (e: MessageEvent) => {
const { type, filesData, mainLccName, fileName } = e.data; const { type, filesData, mainLccName, fileName } = e.data;
if (type === "START_CONVERSION") { if (type === "START_CONVERSION") {
try { try {
self.postMessage({ type: "LOG", message: "Initialisiere..." }); const generatedFiles: { name: string; blob: Blob }[] = [];
// Emscripten's native locateFile hook // 1. Process LCI (Collision)
// @ts-ignore const lciData = filesData.find((f: any) => f.name === "collision.lci");
globalThis.Module = globalThis.Module || {}; if (lciData) {
// @ts-ignore self.postMessage({ type: "LOG", message: "Parsing Collision Mesh..." });
globalThis.Module.locateFile = function (path: string) { generatedFiles.push({
if (path.endsWith(".wasm")) { name: "collision_mesh.ply",
return new URL("/webp.wasm", self.location.origin).href; blob: parseLci(lciData.buffer),
});
} }
return path;
}; // 2. Process Environment (Point Cloud)
const envData = filesData.find((f: any) => f.name === "environment.bin");
if (envData) {
self.postMessage({
type: "LOG",
message: "Parsing Environment Cloud...",
});
generatedFiles.push({
name: "environment_reference.ply",
blob: parseEnvironment(envData.buffer),
});
}
// 3. Process Splat Transformation (SOG / LODs)
self.postMessage({
type: "LOG",
message: "Initializing PlayCanvas Splat Transform...",
});
const { const {
readFile, readFile,
@@ -61,122 +122,78 @@ self.onmessage = async (e: MessageEvent) => {
} = await import("@playcanvas/splat-transform"); } = await import("@playcanvas/splat-transform");
const readFs = new MemoryReadFileSystem(); const readFs = new MemoryReadFileSystem();
self.postMessage({
type: "LOG",
message: "Lade Dateien in den virtuellen Speicher...",
});
for (const file of filesData) { for (const file of filesData) {
readFs.set(file.name, new Uint8Array(file.buffer)); readFs.set(file.name, new Uint8Array(file.buffer));
} }
const readOptions = { const commonOptions = {
iterations: 0, iterations: 10,
lodSelect: [0, 1, 2, 3, 4], // we have captured a total level of 5 lodSelect: [0, 1, 2, 3, 4],
unbundled: false, unbundled: false,
lodChunkCount: 0, lodChunkCount: 0,
lodChunkExtent: 0, lodChunkExtent: 0,
}; };
self.postMessage({ type: "LOG", message: "Lese LCC und Binärdaten..." });
const tables = await readFile({ const tables = await readFile({
filename: mainLccName, filename: mainLccName,
fileSystem: readFs, fileSystem: readFs,
inputFormat: getInputFormat(mainLccName), inputFormat: getInputFormat(mainLccName),
params: [], params: [],
options: readOptions, options: { ...commonOptions, iterations: 0 },
}); });
const mainTable = tables[0]; const mainTable = tables[0];
if (!mainTable) throw new Error("Keine Splat-Daten gefunden."); if (!mainTable) throw new Error("No Splat data found.");
const generatedFiles: { name: string; blob: Blob }[] = [];
// PASS 1: Generate Single High-Quality SOG
self.postMessage({ type: "LOG", message: "Kompiliere Single SOG..." });
// PASS: Single SOG
self.postMessage({ type: "LOG", message: "Compiling High-Res SOG..." });
const writeFsSingle = new MemoryFileSystem(); const writeFsSingle = new MemoryFileSystem();
const singleOutputName = `${fileName}.sog`;
const singleOptions = {
...readOptions,
iterations: 10,
unbundled: false,
};
await writeFile( await writeFile(
{ {
filename: singleOutputName, filename: `${fileName}.sog`,
outputFormat: "sog-bundle", outputFormat: "sog-bundle",
dataTable: mainTable, dataTable: mainTable,
options: singleOptions, options: commonOptions,
}, },
writeFsSingle, writeFsSingle,
); );
const singleSogData = writeFsSingle.results.get(singleOutputName); const singleData = writeFsSingle.results.get(`${fileName}.sog`);
if (singleSogData) { if (singleData) {
generatedFiles.push({ generatedFiles.push({
name: singleOutputName, name: `${fileName}.sog`,
blob: new Blob([new Uint8Array(singleSogData).slice().buffer], { blob: new Blob([new Uint8Array(singleData).buffer]),
type: "application/octet-stream",
}),
}); });
} }
// ========================================== // PASS: LOD Chunks
// PASS 2: Generate Unbundled LOD SOGs + JSON self.postMessage({ type: "LOG", message: "Compiling LOD Chunks..." });
// ==========================================
self.postMessage({ type: "LOG", message: "Kompiliere LOD Chunks..." });
const writeFsLods = new MemoryFileSystem(); const writeFsLods = new MemoryFileSystem();
await writeFile(
// MUST be exactly "meta.json" for unbundled SOG format {
const lodsOutputName = "meta.json"; filename: "meta.json",
outputFormat: "sog",
const lodOptions = { dataTable: mainTable,
...readOptions, options: {
iterations: 10, ...commonOptions,
unbundled: true, unbundled: true,
lodChunkCount: 512, lodChunkCount: 512,
lodChunkExtent: 16, lodChunkExtent: 16,
}; },
await writeFile(
{
filename: lodsOutputName,
outputFormat: "sog",
dataTable: mainTable,
options: lodOptions,
}, },
writeFsLods, writeFsLods,
); );
// Jetzt iterieren wir über alle generierten Dateien im System for (const [name, data] of writeFsLods.results.entries()) {
for (const [generatedName, data] of writeFsLods.results.entries()) {
const mimeType = generatedName.endsWith(".json")
? "application/json"
: "application/octet-stream";
generatedFiles.push({ generatedFiles.push({
name: generatedName, name,
blob: new Blob([new Uint8Array(data).slice().buffer], { blob: new Blob([new Uint8Array(data).buffer]),
type: mimeType,
}),
}); });
} }
// Send all Data to Frontend self.postMessage({ type: "DONE", data: { files: generatedFiles } });
self.postMessage({
type: "DONE",
data: {
files: generatedFiles,
},
});
} catch (err: any) { } catch (err: any) {
self.postMessage({ type: "LOG", message: `Fehler: ${err.message}` }); self.postMessage({ type: "LOG", message: `Error: ${err.message}` });
console.error(err);
} }
} }
}; };

View File

@@ -1,90 +0,0 @@
#!/usr/bin/env python3
import argparse
import struct
import sys
from pathlib import Path
def convert_env_to_ply(input_path, output_path, verbose=False):
"""
Parses Xgrids environment.bin using standard library only.
Format: 44 bytes per splat (11 little-endian floats).
"""
input_path = Path(input_path)
output_path = Path(output_path)
if not input_path.exists():
print(f"✗ Error: File '{input_path}' not found.")
sys.exit(1)
# 44 bytes per point (Position x,y,z | Scale x,y,z | Rotation q1,q2,q3,q4 | Opacity)
POINT_SIZE = 44
try:
file_size = input_path.stat().st_size
num_points = file_size // POINT_SIZE
if verbose:
print("-" * 50)
print(f"Input: {input_path}")
print(f"Output: {output_path}")
print(f"Size: {file_size / (1024*1024):.2f} MB")
print(f"Points: {num_points:,}")
print("-" * 50)
with open(input_path, "rb") as f_in, open(output_path, "w") as f_out:
# 1. Write PLY Header
f_out.write("ply\n")
f_out.write("format ascii 1.0\n")
f_out.write(f"element vertex {num_points}\n")
f_out.write("property float x\n")
f_out.write("property float y\n")
f_out.write("property float z\n")
f_out.write("end_header\n")
# 2. Process Binary in Chunks (to keep RAM usage low)
# 10,000 points per chunk is a good balance for standard Python
chunk_size = 10000
points_processed = 0
while points_processed < num_points:
remaining = num_points - points_processed
batch_size = min(chunk_size, remaining)
# Read binary chunk
chunk_data = f_in.read(batch_size * POINT_SIZE)
if not chunk_data:
break
# Unpack and write
# '<3f' grabs just the first 3 floats (XYZ) and ignores the rest of the 44 bytes
for i in range(batch_size):
offset = i * POINT_SIZE
# We only unpack the first 12 bytes (3 floats) of the 44-byte block
x, y, z = struct.unpack_from("<fff", chunk_data, offset)
f_out.write(f"{x:.6f} {y:.6f} {z:.6f}\n")
points_processed += batch_size
if verbose and points_processed % 50000 == 0:
print(f"• Progress: {points_processed:,} / {num_points:,}")
print(f"✓ Success! Converted {num_points:,} points.")
except Exception as e:
print(f"✗ Error during conversion: {e}")
sys.exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Zero-dependency CLI tool to convert Xgrids environment.bin to PLY."
)
parser.add_argument("input", help="Path to environment.bin")
parser.add_argument("output", nargs="?", help="Output .ply path")
parser.add_argument("-v", "--verbose", action="store_true", help="Enable logging")
args = parser.parse_args()
# Default output logic
out_path = Path(args.output) if args.output else Path(args.input).with_suffix(".ply")
convert_env_to_ply(args.input, out_path, args.verbose)

View File

@@ -1,325 +0,0 @@
#!/usr/bin/env python3
"""
XGrids LCI to PLY Converter
Converts XGrids .lci collision files to standard .ply format
Usage:
# Convert with default output name (collision_mesh.ply)
python convert_lci_to_ply.py path/to/collision.lci
# Specify custom output file
python convert_lci_to_ply.py path/to/collision.lci output/mesh.ply
# Verbose output
python convert_lci_to_ply.py path/to/collision.lci -v
"""
import argparse
import struct
import sys
from pathlib import Path
# Constants
LCI_MAGIC = 0x6C6C6F63 # 'coll' in little-endian
MESH_HEADER_SIZE = 40 # bytes per mesh header
def read_lci_file(filepath, verbose=False):
"""
Parse XGrids .lci collision file according to official LCI specification
Args:
filepath: Path to input .lci file
verbose: Enable verbose output
Returns:
Tuple of (vertices, faces) where:
- vertices: List of (x, y, z) tuples
- faces: List of (v0, v1, v2) tuples with vertex indices
"""
filepath = Path(filepath)
if not filepath.exists():
raise FileNotFoundError(f"Input file not found: {filepath}")
with open(filepath, "rb") as f:
lci_data = f.read()
# Validate minimum file size
if len(lci_data) < 48:
raise ValueError("File too small to be valid LCI format (minimum 48 bytes required)")
# Warn about large files that may cause memory issues
file_size_mb = len(lci_data) / (1024 * 1024)
if file_size_mb > 100 and verbose:
print(f"Warning: Large file ({file_size_mb:.1f} MB) loaded into memory")
# Read main header
magic = struct.unpack("<I", lci_data[0:4])[0]
if magic != LCI_MAGIC:
raise ValueError(
f"Invalid file format. Expected magic 0x{LCI_MAGIC:08X}, got 0x{magic:08X}"
)
version = struct.unpack("<I", lci_data[4:8])[0]
header_len = struct.unpack("<I", lci_data[8:12])[0]
min_x, min_y, min_z = struct.unpack("<fff", lci_data[12:24])
max_x, max_y, max_z = struct.unpack("<fff", lci_data[24:36])
cell_length_x = struct.unpack("<f", lci_data[36:40])[0]
cell_length_y = struct.unpack("<f", lci_data[40:44])[0]
mesh_num = struct.unpack("<I", lci_data[44:48])[0]
# Validate mesh count
if mesh_num == 0:
raise ValueError("LCI file contains no mesh data")
# Validate header length
expected_header = 48 + mesh_num * MESH_HEADER_SIZE
if header_len != expected_header:
raise ValueError(
f"Header length mismatch: expected {expected_header}, got {header_len}"
)
if verbose:
print("LCI File Information:")
print(f" Version: {version}")
print(f" Header length: {header_len} bytes")
print(
f" Bounding box: min({min_x:.2f}, {min_y:.2f}, {min_z:.2f}) "
f"max({max_x:.2f}, {max_y:.2f}, {max_z:.2f})"
)
print(f" Cell length: X={cell_length_x:.2f}, Y={cell_length_y:.2f}")
print(f" Mesh count: {mesh_num}\n")
# Read mesh headers (40 bytes each, starting at offset 48)
meshes = []
mesh_header_offset = 48
for i in range(mesh_num):
offset = mesh_header_offset + i * MESH_HEADER_SIZE
# Validate buffer bounds for mesh header
if offset + MESH_HEADER_SIZE > len(lci_data):
raise ValueError(
f"Mesh header {i} exceeds file size (offset {offset}, file size {len(lci_data)})"
)
index_x = struct.unpack("<I", lci_data[offset : offset + 4])[0]
index_y = struct.unpack("<I", lci_data[offset + 4 : offset + 8])[0]
data_offset = struct.unpack("<Q", lci_data[offset + 8 : offset + 16])[0]
bytes_size = struct.unpack("<Q", lci_data[offset + 16 : offset + 24])[0]
vertex_num = struct.unpack("<I", lci_data[offset + 24 : offset + 28])[0]
face_num = struct.unpack("<I", lci_data[offset + 28 : offset + 32])[0]
bvh_size = struct.unpack("<I", lci_data[offset + 32 : offset + 36])[0]
meshes.append(
{
"index_x": index_x,
"index_y": index_y,
"offset": data_offset,
"bytes_size": bytes_size,
"vertex_num": vertex_num,
"face_num": face_num,
"bvh_size": bvh_size,
}
)
if verbose:
print(
f"Mesh {i}: grid({index_x},{index_y}), "
f"{vertex_num:,} verts, {face_num:,} faces"
)
if verbose:
print()
# Read all mesh data
all_vertices = []
all_faces = []
global_vertex_offset = 0
for i, mesh in enumerate(meshes):
mesh_offset = mesh["offset"]
vertex_num = mesh["vertex_num"]
face_num = mesh["face_num"]
bytes_size = mesh["bytes_size"]
bvh_size = mesh["bvh_size"]
# Validate mesh data bounds
expected_data_size = (vertex_num * 12) + (face_num * 12) + bvh_size
if expected_data_size != bytes_size:
raise ValueError(
f"Mesh {i} data size mismatch: expected {expected_data_size} bytes, "
f"header specifies {bytes_size} bytes"
)
if mesh_offset + expected_data_size > len(lci_data):
raise ValueError(
f"Mesh {i} data exceeds file size (offset {mesh_offset}, "
f"data size {expected_data_size}, file size {len(lci_data)})"
)
# Read vertices
vertices = []
pos = mesh_offset
for j in range(vertex_num):
x, y, z = struct.unpack("<fff", lci_data[pos : pos + 12])
vertices.append((x, y, z))
pos += 12
# Read faces
faces = []
for j in range(face_num):
v0, v1, v2 = struct.unpack("<III", lci_data[pos : pos + 12])
# Validate face indices
if v0 >= vertex_num or v1 >= vertex_num or v2 >= vertex_num:
raise ValueError(
f"Invalid face indices in mesh {i}: ({v0}, {v1}, {v2}), "
f"vertex_num={vertex_num}"
)
# Adjust indices to global vertex offset
faces.append(
(
v0 + global_vertex_offset,
v1 + global_vertex_offset,
v2 + global_vertex_offset,
)
)
pos += 12
all_vertices.extend(vertices)
all_faces.extend(faces)
global_vertex_offset += len(vertices)
if verbose:
print(f"Total: {len(all_vertices):,} vertices, {len(all_faces):,} faces")
return all_vertices, all_faces
def write_ply_ascii(filepath, vertices, faces, verbose=False):
"""
Write mesh data to ASCII PLY file
Args:
filepath: Path to output .ply file
vertices: List of (x, y, z) vertex tuples
faces: List of (v0, v1, v2) face tuples
verbose: Enable verbose output
"""
filepath = Path(filepath)
# Create output directory if needed
filepath.parent.mkdir(parents=True, exist_ok=True)
if verbose:
print(f"\nWriting ASCII PLY to: {filepath}")
with open(filepath, "w") as f:
# Write header
f.write("ply\n")
f.write("format ascii 1.0\n")
f.write(f"element vertex {len(vertices)}\n")
f.write("property float x\n")
f.write("property float y\n")
f.write("property float z\n")
f.write(f"element face {len(faces)}\n")
f.write("property list uchar int vertex_indices\n")
f.write("end_header\n")
# Write vertices with full precision
for v in vertices:
f.write(f"{v[0]:.9g} {v[1]:.9g} {v[2]:.9g}\n")
# Write faces
for face in faces:
f.write(f"3 {face[0]} {face[1]} {face[2]}\n")
if verbose:
file_size_kb = filepath.stat().st_size / 1024
print(f" File size: {file_size_kb:.1f} KB")
def parse_args():
"""Parse command line arguments"""
parser = argparse.ArgumentParser(
description="Convert XGrids .lci collision files to .ply format",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# Convert with default output name (collision_mesh.ply in same directory)
python convert_lci_to_ply.py collision.lci
# Specify custom output file
python convert_lci_to_ply.py collision.lci output/mesh.ply
# Verbose output
python convert_lci_to_ply.py collision.lci -v
""",
)
parser.add_argument("input", type=Path, help="Input .lci collision file")
parser.add_argument(
"output",
type=Path,
nargs="?",
help="Output .ply file (default: collision_mesh.ply in same directory as input)",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="Enable verbose output"
)
return parser.parse_args()
def main():
"""Main entry point"""
args = parse_args()
# Determine output path
output_path = (
args.input.parent / "collision_mesh.ply" if args.output is None else args.output
)
if args.verbose:
print("=" * 60)
print(f"Converting: {args.input}")
print(f"Output: {output_path}")
print("=" * 60 + "\n")
try:
# Read LCI file
vertices, faces = read_lci_file(args.input, verbose=args.verbose)
if not vertices:
print("Error: No mesh data extracted from file", file=sys.stderr)
sys.exit(1)
# Write PLY file
write_ply_ascii(output_path, vertices, faces, verbose=args.verbose)
if args.verbose:
print("\n" + "=" * 60)
print("✓ Conversion successful!")
else:
print(f"✓ Converted {args.input} -> {output_path}")
print(f" {len(vertices):,} vertices, {len(faces):,} faces")
except Exception as e:
print(f"\n✗ Conversion failed: {e}", file=sys.stderr)
if args.verbose:
import traceback
traceback.print_exc()
sys.exit(1)
if __name__ == "__main__":
main()