import { Vec3 } from "gl-matrix";
import BVH from "./bvh.ts";
import { createCamera, setupCameraInput, updateCamera, updateMovementInput } from "./camera.ts";
import { GLTF2, combineGLTFs } from "./gltf.ts";
import { InitPane } from "./pane.ts";
import kernel from "./shaders/main.wgsl";
import viewport from "./shaders/viewport.wgsl";
declare global {
interface Window {
framecount: number;
}
}
// init device
const canvas = document.querySelector("canvas") as HTMLCanvasElement;
const adapter = (await navigator.gpu.requestAdapter()) as GPUAdapter;
if (!navigator.gpu) {
throw new Error("WebGPU not supported on this browser.");
}
if (!adapter) {
throw new Error("No appropriate GPUAdapter found.");
}
const device = await adapter.requestDevice({
requiredFeatures: ['timestamp-query']
});
const width = canvas.clientWidth;
const height = canvas.clientHeight;
canvas.width = Math.max(1, Math.min(width, device.limits.maxTextureDimension2D));
canvas.height = Math.max(1, Math.min(height, device.limits.maxTextureDimension2D));
const context = canvas.getContext("webgpu") as GPUCanvasContext;
const format = navigator.gpu.getPreferredCanvasFormat();
context.configure({
device,
format: format,
});
// compose scene -> triangles -> BVH -> textures
const x = new GLTF2(device, "Duck.gltf", [0.1, 0.1, 0.1], [-13, -1, -0.34], [0, 0, -1.25, 1]);
const y = new GLTF2(device, "cornell_empty_rg.gltf", [20, 20, 20], [0, 0, 0.01], [0,0,0,0])
const z = new GLTF2(device, "EnvironmentTest.gltf", [1.8, 1.8, 1.8], [0, 15, 25], [0, 0, 0, 0]);
await x.initialize()
await y.initialize()
await z.initialize()
const t = combineGLTFs([x,y,z]);
let ab = new BVH(t.triangles);
ab.construct();
const hasTextures = t.textures && t.textures.length > 0;
const textureCount = hasTextures ? t.textures.length : 0;
const textureSizes = new Float32Array(textureCount * 4); // [width, height, invWidth, invHeight] per texture
console.log(t.triangles)
// viewport texture, rgba32float; we store full fat HDR and tonemap it in this
const viewportTexture = device.createTexture({
size: {
width: canvas.width,
height: canvas.height,
},
format: "rgba32float",
usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.STORAGE_BINDING | GPUTextureUsage.TEXTURE_BINDING,
});
const viewportTextureColorBuffer = viewportTexture.createView();
// offsets for buffer data
const MaterialSize = 64;
const materialData = new Float32Array(t.materials.length * (MaterialSize / 4));
const MaterialInfo = {
albedo: { type: Float32Array, byteOffset: 0, length: 4 },
metallic: { type: Float32Array, byteOffset: 16, length: 1 },
alphaMode: { type: Float32Array, byteOffset: 20, length: 1 },
alphaCutoff: { type: Float32Array, byteOffset: 24, length: 1 },
doubleSided: { type: Float32Array, byteOffset: 28, length: 1 },
emission: { type: Float32Array, byteOffset: 32, length: 3 },
roughness: { type: Float32Array, byteOffset: 44, length: 1 },
baseColorTexture: { type: Float32Array, byteOffset: 48, length: 1 },
normalTexture: { type: Float32Array, byteOffset: 52, length: 1 },
metallicRoughnessTexture: { type: Float32Array, byteOffset: 56, length: 1 },
emissiveTexture: { type: Float32Array, byteOffset: 60, length: 1 },
};
// NB: Very fat. Trimming these to vert should be (4*3) * 3 + 12 = 48.
// at the point if it's just 48, might be cheaper to get rid of the triangle indicies. Skip the BVH -> tri_index lookup.
// and then resolve the shading data with tri indicies? This would still need material index thought? To do alpha tests in trace()?
// TODO: Trim these to only verts, the material index, and the shading index. Move the rest to shadingData[]
// Could also, let this be. Skip the verts here and move verts directly into the BVH. And make sure the indices line up before passing it in.
const TriangleSize = 176;
const TriangleData = new Float32Array(t.triangles.length * (TriangleSize / 4));
const TriangleInfo = {
corner_a: { type: Float32Array, byteOffset: 0, length: 3 },
corner_b: { type: Float32Array, byteOffset: 16, length: 3 },
corner_c: { type: Float32Array, byteOffset: 32, length: 3 },
normal_a: { type: Float32Array, byteOffset: 48, length: 3 },
normal_b: { type: Float32Array, byteOffset: 64, length: 3 },
normal_c: { type: Float32Array, byteOffset: 80, length: 3 },
material: { type: Float32Array, byteOffset: 92, length: 1 },
uVA: { type: Float32Array, byteOffset: 96, length: 2 },
uVB: { type: Float32Array, byteOffset: 104, length: 2 },
uVC: { type: Float32Array, byteOffset: 112, length: 2 },
tangentA: { type: Float32Array, byteOffset: 128, length: 4 },
tangentB: { type: Float32Array, byteOffset: 144, length: 4 },
tangentC: { type: Float32Array, byteOffset: 160, length: 4 },
};
// init scene buffers
const triangleBuffer = device.createBuffer({
label: "Triangle Storage",
size: t.triangles.length * TriangleSize,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
});
const materialBuffer = device.createBuffer({
label: "Material storage",
size: 8 * materialData.length,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
});
const emissiveTrianglesBuffer = device.createBuffer({
label: "Emissive triangles",
size: t.triangles.length * TriangleSize,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
});
const nodeBuffer = device.createBuffer({
size: 32 * ab.nodesUsed,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
});
const triangleIndexBuffer = device.createBuffer({
size: 4 * t.triangles.length,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
});
const accumulationBuffer = device.createBuffer({
size: canvas.width * canvas.height * 16,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
});
const uniformBuffer0 = device.createBuffer({
label: "Camera Transform Buffer",
size: 512,
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
});
const textureSizeBuffer = device.createBuffer({
size: Math.max(textureSizes.byteLength, 2048),
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
});
// populate buffers
const emissiveMaterialIndices: number[] = []
const emissiveTriangleIndices: number[] = []
const bvhPrimitiveTriangleIndices: Float32Array = new Float32Array(ab.triIdx.length);
type MaterialPropertyName = keyof typeof MaterialInfo;
t.materials.forEach((mat, i) => {
const materialOffset = i * (MaterialSize / 4); // Base offset for the current material
const setData = (propertyName: MaterialPropertyName, value: number[]) => {
const info = MaterialInfo[propertyName];
materialData.set(value, materialOffset + info.byteOffset / 4);
};
const setFloat = (propertyName: MaterialPropertyName, value: number) => {
const info = MaterialInfo[propertyName];
materialData[materialOffset + info.byteOffset / 4] = value;
};
setData("albedo", mat.baseColorFactor); // Now sets 4 floats instead of 3
setFloat("metallic", mat.metallicFactor);
setFloat("alphaMode", mat.alphaMode);
setFloat("alphaCutoff", mat.alphaCutoff);
setFloat("doubleSided", mat.doubleSided);
setData("emission", mat.emissiveFactor);
setFloat("roughness", mat.roughnessFactor);
setFloat("baseColorTexture", mat.baseColorTexture);
setFloat("normalTexture", mat.normalTexture);
setFloat("metallicRoughnessTexture", mat.metallicRoughnessTexture);
setFloat("emissiveTexture", mat.emissiveTexture);
if (mat.emissiveFactor[0] !== 0 || mat.emissiveFactor[1] !== 0 || mat.emissiveFactor[2] !== 0) {
emissiveMaterialIndices.push(i);
}
});
device.queue.writeBuffer(materialBuffer, 0, materialData);
type TrianglePropertyName = keyof typeof TriangleInfo;
t.triangles.forEach((tri, i) => {
const triOffset = i * (TriangleSize / 4);
const setData = (propertyName: TrianglePropertyName, value: number[]) => {
const info = TriangleInfo[propertyName];
TriangleData.set(value, triOffset + info.byteOffset / 4);
};
const setFloat = (propertyName: TrianglePropertyName, value: number) => {
const info = TriangleInfo[propertyName];
TriangleData[triOffset + info.byteOffset / 4] = value;
};
setData("corner_a", tri.cornerA);
setData("corner_b", tri.cornerB);
setData("corner_c", tri.cornerC);
setData("normal_a", tri.normalA);
setData("normal_b", tri.normalB);
setData("normal_c", tri.normalC);
setFloat("material", tri.mat);
setData("uVA", tri.uvA);
setData("uVB", tri.uvB);
setData("uVC", tri.uvC);
setData("tangentA", tri.tangentA);
setData("tangentB", tri.tangentB);
setData("tangentC", tri.tangentC);
if (emissiveMaterialIndices.includes(tri.mat)) {
emissiveTriangleIndices.push(i); // Push the triangle's index
}
});
device.queue.writeBuffer(triangleBuffer, 0, TriangleData);
device.queue.writeBuffer(emissiveTrianglesBuffer, 0, new Float32Array(emissiveTriangleIndices));
const nodeData: Float32Array = new Float32Array(8 * ab.nodesUsed);
for (let i = 0; i < ab.nodesUsed; i++) {
const minOffset = i * 3;
const maxOffset = i * 3;
nodeData[8 * i] = ab.nodesMin[minOffset + 0];
nodeData[8 * i + 1] = ab.nodesMin[minOffset + 1];
nodeData[8 * i + 2] = ab.nodesMin[minOffset + 2];
nodeData[8 * i + 3] = ab.nodesLeft[i];
nodeData[8 * i + 4] = ab.nodesMax[maxOffset + 0];
nodeData[8 * i + 5] = ab.nodesMax[maxOffset + 1];
nodeData[8 * i + 6] = ab.nodesMax[maxOffset + 2];
nodeData[8 * i + 7] = ab.nodesInstanceCount[i];
}
device.queue.writeBuffer(nodeBuffer, 0, nodeData, 0, 8 * ab.nodesUsed);
for (let i = 0; i < ab.triIdx.length; i++) {
bvhPrimitiveTriangleIndices[i] = ab.triIdx[i];
}
device.queue.writeBuffer(triangleIndexBuffer, 0, bvhPrimitiveTriangleIndices, 0, ab.triIdx.length);
// bluenoise texture 2. 3.24: Bluenoise
// bluenoise texture form https://momentsingraphics.de/BlueNoise.html
async function loadImageBitmap(url: string) {
const res = await fetch(url);
const blob = await res.blob();
return await createImageBitmap(blob, { colorSpaceConversion: 'none' });
}
const bnnoiseSource = await loadImageBitmap("LDR_RGBA_0.png")
const blueNoiseTexture = device.createTexture({
label: 'bluenoise-texture',
format: 'rgba8unorm',
size: [bnnoiseSource.width, bnnoiseSource.height],
usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.STORAGE_BINDING | GPUTextureUsage.TEXTURE_BINDING,
});
device.queue.copyExternalImageToTexture(
{ source: bnnoiseSource },
{ texture: blueNoiseTexture },
{ width: bnnoiseSource.width, height: bnnoiseSource.height },
);
// construct the texture atlas
const emptySampler = device.createSampler({
addressModeU: "clamp-to-edge",
addressModeV: "clamp-to-edge",
addressModeW: "clamp-to-edge",
magFilter: "nearest",
minFilter: "nearest",
mipmapFilter: "nearest",
});
const emptyTexture = device.createTexture({
size: [1, 1, 1],
format: "rgba8unorm",
usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.COPY_DST,
});
const emptyView = emptyTexture.createView({
dimension: "2d-array",
});
let maxWidth = 1, maxHeight = 1;
let textureSampler = emptySampler;
let textureArray = emptyTexture;
let textureViewArray = emptyView;
if (hasTextures) {
maxWidth = t.largestTextureDimensions.width
maxHeight = t.largestTextureDimensions.height
textureSampler = t.textures[0].sampler // lazy, store the samplers correctly
textureArray = device.createTexture({
size: [maxWidth, maxHeight, t.textures.length],
format: "rgba8unorm",
usage:
GPUTextureUsage.TEXTURE_BINDING |
GPUTextureUsage.COPY_DST |
GPUTextureUsage.RENDER_ATTACHMENT,
dimension: "2d",
})
textureViewArray = textureArray.createView({ dimension: "2d-array" });
}
// rather wasteful (and sometimes incorrect, but whatever. Fine for now)
// 1. get each texture's dimension
// 2. pad it to the largest one
// 3. store the original h w in textureSizes[]
// 4. stack the padded texture
if (t.textures.length) {
for (let i = 0; i < t.textures.length; i++) {
const source = t.textures[i].source;
// @ts-ignore / Poorly defined type for the original GLTFImagePostprocessed
const bitmap = source.image as ImageBitmap;
textureSizes[i * 4] = bitmap.width;
textureSizes[i * 4 + 1] = bitmap.height;
textureSizes[i * 4 + 2] = 0.0;
textureSizes[i * 4 + 3] = 0.0;
device.queue.copyExternalImageToTexture(
{ source: bitmap },
{ texture: textureArray, origin: [0, 0, i] },
[bitmap.width, bitmap.height, 1]
);
}
device.queue.writeBuffer(textureSizeBuffer, 0, textureSizes);
}
// bind groups and layouts
const geometryBindgroupLayout = device.createBindGroupLayout({
label: 'geometry-bind-group-layout',
entries: [
{
binding: 0,
visibility: GPUShaderStage.COMPUTE,
storageTexture: {
access: "write-only",
format: "rgba32float",
viewDimension: "2d",
},
},
{
binding: 1,
visibility: GPUShaderStage.COMPUTE,
buffer: {
type: "read-only-storage",
hasDynamicOffset: false,
},
},
{
binding: 2,
visibility: GPUShaderStage.COMPUTE,
buffer: {
type: "uniform",
},
},
{
binding: 5,
visibility: GPUShaderStage.COMPUTE,
buffer: {
type: "read-only-storage",
hasDynamicOffset: false,
},
},
{
binding: 6,
visibility: GPUShaderStage.COMPUTE,
buffer: {
type: "read-only-storage",
hasDynamicOffset: false,
},
},
{
binding: 7,
visibility: GPUShaderStage.COMPUTE,
buffer: {
type: "storage",
},
},
],
});
const geometryBindgroup = device.createBindGroup({
label: 'geometry-bind-group',
layout: geometryBindgroupLayout,
entries: [
{
binding: 0,
resource: viewportTextureColorBuffer,
},
{
binding: 1,
resource: {
buffer: triangleBuffer,
},
},
{
binding: 2,
resource: {
buffer: uniformBuffer0,
},
},
{
binding: 5,
resource: { buffer: nodeBuffer },
},
{
binding: 6,
resource: { buffer: triangleIndexBuffer },
},
{
binding: 7,
resource: { buffer: accumulationBuffer },
},
],
});
const shadingBindGroupLayout = device.createBindGroupLayout({
label: 'shading-bind-group-layout',
entries: [
{
binding: 0,
visibility: GPUShaderStage.COMPUTE,
buffer: {
type: "read-only-storage",
hasDynamicOffset: false,
},
},
{
binding: 1,
visibility: GPUShaderStage.COMPUTE,
texture: {
viewDimension: "2d-array",
},
},
{
binding: 2,
visibility: GPUShaderStage.COMPUTE,
sampler: {},
},
{
binding: 4,
visibility: GPUShaderStage.COMPUTE,
buffer: {
type: "uniform",
hasDynamicOffset: false,
},
},
{
binding: 6,
visibility: GPUShaderStage.COMPUTE,
storageTexture: {
access: "read-only",
format: "rgba8unorm",
viewDimension: "2d",
},
},
{
binding: 7,
visibility: GPUShaderStage.COMPUTE,
buffer: {
type: "read-only-storage",
hasDynamicOffset: false,
},
},
],
});
const shadingBindGroup = device.createBindGroup({
label: 'shading-bind-group',
layout: shadingBindGroupLayout,
entries: [
{
binding: 0,
resource: {
buffer: materialBuffer,
},
},
{ binding: 1, resource: textureViewArray },
{ binding: 2, resource: textureSampler },
{
binding: 4,
resource: {
buffer: textureSizeBuffer,
},
},
{
binding: 6,
resource: blueNoiseTexture.createView(),
},
{
binding: 7,
resource: {
buffer: emissiveTrianglesBuffer,
},
},
],
});
const viewportBindgroupLayout = device.createBindGroupLayout({
entries: [
{
binding: 0,
visibility: GPUShaderStage.FRAGMENT,
texture: {
sampleType: 'unfilterable-float',
viewDimension: '2d',
multisampled: false,
},
},
],
});
const viewportBindgroup = device.createBindGroup({
layout: viewportBindgroupLayout,
entries: [
{
binding: 0,
resource: viewportTextureColorBuffer,
},
],
});
// pipelines
const kernelPipelineLayout = device.createPipelineLayout({
bindGroupLayouts: [geometryBindgroupLayout, shadingBindGroupLayout],
});
const kernelPipeline = device.createComputePipeline({
layout: kernelPipelineLayout,
compute: {
module: device.createShaderModule({
code: kernel,
}),
entryPoint: "main",
},
});
const viewportPipelineLayout = device.createPipelineLayout({
bindGroupLayouts: [viewportBindgroupLayout],
});
const viewportPipeline = device.createRenderPipeline({
layout: viewportPipelineLayout,
vertex: {
module: device.createShaderModule({
code: viewport,
}),
entryPoint: "vert_main",
},
fragment: {
module: device.createShaderModule({
code: viewport,
}),
entryPoint: "frag_main",
targets: [
{
format: format,
},
],
},
primitive: {
topology: "triangle-list",
},
});
var frametime = 0;
window.framecount = 0;
const UNIFORMS = {
sample_count: 1.0,
bounce_count: 3.0,
aperture: 0.1,
focal_length: 4.0,
frameTimeMs: 0,
fps: frametime / 1000,
sun_angle: { x: 0.3, y: -0.7, z: 0.3 },
sun_color: { r: 1.0, g: 0.96, b: 0.85 },
scale: 22000.0, // sun_color rgb -> lux scale
albedo_factor: z.materials[0].baseColorFactor,
metallicFactor: z.materials[0].metallicFactor,
roughnessFactor: z.materials[0].roughnessFactor,
thin_lens: false,
};
// initialize values based on UNIFORMS, updates are createPane()
device.queue.writeBuffer(uniformBuffer0, 208, Vec3.fromValues(UNIFORMS.sun_angle.x, UNIFORMS.sun_angle.y, UNIFORMS.sun_angle.z));
device.queue.writeBuffer(uniformBuffer0, 220, new Float32Array([0.53 * (Math.PI / 180.0)])); // ~0.5332 degrees / 32.15 arcminutes
device.queue.writeBuffer(uniformBuffer0, 224, Vec3.fromValues(UNIFORMS.sun_color.r * UNIFORMS.scale, UNIFORMS.sun_color.g * UNIFORMS.scale, UNIFORMS.sun_color.b * UNIFORMS.scale));
device.queue.writeBuffer(uniformBuffer0, 236, new Float32Array([UNIFORMS.sample_count, UNIFORMS.bounce_count, UNIFORMS.aperture, UNIFORMS.focal_length]));
device.queue.writeBuffer(uniformBuffer0, 252, new Float32Array([emissiveTriangleIndices.length - 1, UNIFORMS.thin_lens ? 1 : 0]));
let camera = createCamera(canvas);
InitPane(device, UNIFORMS, uniformBuffer0)
setupCameraInput(canvas)
device.queue.writeBuffer(uniformBuffer0, 0, camera.position);
device.queue.writeBuffer(uniformBuffer0, 16, camera.view);
device.queue.writeBuffer(uniformBuffer0, 80, camera.inverseView);
device.queue.writeBuffer(uniformBuffer0, 144, camera.projection);
let cpuStart = 0;
let cpuEnd = 0;
let frametimeMs;
const framedata = new Float32Array(1);
const workgroupSize = 16;
const dispatchX = Math.ceil(width / workgroupSize);
const dispatchY = Math.ceil(height / workgroupSize);
async function renderFrame() {
cpuStart = performance.now();
window.framecount++;
framedata[0] = window.framecount;
updateMovementInput();
updateCamera(camera);
if (camera.dirty) {
window.framecount = 0; // reset accumulation
device.queue.writeBuffer(uniformBuffer0, 0, camera.position);
device.queue.writeBuffer(uniformBuffer0, 16, camera.view);
device.queue.writeBuffer(uniformBuffer0, 80, camera.inverseView);
device.queue.writeBuffer(uniformBuffer0, 144, camera.projection);
}
device.queue.writeBuffer(uniformBuffer0, 12, framedata);
const commandEncoder = device.createCommandEncoder();
// compute pass
var computePass = commandEncoder.beginComputePass();
computePass.setPipeline(kernelPipeline);
computePass.setBindGroup(0, geometryBindgroup);
computePass.setBindGroup(1, shadingBindGroup);
computePass.dispatchWorkgroups(dispatchX, dispatchY);
computePass.end();
// blitt pass
const renderPass = commandEncoder.beginRenderPass({
label: "main",
colorAttachments: [
{
view: context.getCurrentTexture().createView(),
clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 0.0 }, // rgba
loadOp: "clear",
storeOp: "store",
},
],
});
renderPass.setPipeline(viewportPipeline);
renderPass.setBindGroup(0, viewportBindgroup);
renderPass.draw(6, 1, 0, 0);
renderPass.end();
device.queue.submit([commandEncoder.finish()]);
device.queue.onSubmittedWorkDone().then(
() => {
cpuEnd = performance.now();
frametimeMs = cpuEnd - cpuStart;
frametime = parseInt(frametimeMs.toFixed(2));
UNIFORMS.frameTimeMs = frametime;
}
);
requestAnimationFrame(renderFrame);
}
requestAnimationFrame(renderFrame);