1import { Vec3 } from "gl-matrix";
2import BVH from "./bvh.ts";
3import { createCamera, setupCameraInput, updateCamera, updateMovementInput } from "./camera.ts";
4import { GLTF2, combineGLTFs } from "./gltf.ts";
5import { InitPane } from "./pane.ts";
6import kernel from "./shaders/main.wgsl";
7import viewport from "./shaders/viewport.wgsl";
8
9declare global {
10 interface Window {
11 framecount: number;
12 }
13}
14
15// init device
16const canvas = document.querySelector("canvas") as HTMLCanvasElement;
17const adapter = (await navigator.gpu.requestAdapter()) as GPUAdapter;
18if (!navigator.gpu) {
19 throw new Error("WebGPU not supported on this browser.");
20}
21if (!adapter) {
22 throw new Error("No appropriate GPUAdapter found.");
23}
24const device = await adapter.requestDevice({
25 requiredFeatures: ['timestamp-query']
26});
27
28const width = canvas.clientWidth;
29const height = canvas.clientHeight;
30canvas.width = Math.max(1, Math.min(width, device.limits.maxTextureDimension2D));
31canvas.height = Math.max(1, Math.min(height, device.limits.maxTextureDimension2D));
32const context = canvas.getContext("webgpu") as GPUCanvasContext;
33const format = navigator.gpu.getPreferredCanvasFormat();
34context.configure({
35 device,
36 format: format,
37});
38
39// compose scene -> triangles -> BVH -> textures
40const x = new GLTF2(device, "Duck.gltf", [0.1, 0.1, 0.1], [-13, -1, -0.34], [0, 0, -1.25, 1]);
41const y = new GLTF2(device, "cornell_empty_rg.gltf", [20, 20, 20], [0, 0, 0.01], [0,0,0,0])
42const z = new GLTF2(device, "EnvironmentTest.gltf", [1.8, 1.8, 1.8], [0, 15, 25], [0, 0, 0, 0]);
43
44await x.initialize()
45await y.initialize()
46await z.initialize()
47const t = combineGLTFs([x,y,z]);
48let ab = new BVH(t.triangles);
49ab.construct();
50const hasTextures = t.textures && t.textures.length > 0;
51const textureCount = hasTextures ? t.textures.length : 0;
52const textureSizes = new Float32Array(textureCount * 4); // [width, height, invWidth, invHeight] per texture
53console.log(t.triangles)
54// viewport texture, rgba32float; we store full fat HDR and tonemap it in this
55const viewportTexture = device.createTexture({
56 size: {
57 width: canvas.width,
58 height: canvas.height,
59 },
60 format: "rgba32float",
61 usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.STORAGE_BINDING | GPUTextureUsage.TEXTURE_BINDING,
62});
63const viewportTextureColorBuffer = viewportTexture.createView();
64
65
66// offsets for buffer data
67const MaterialSize = 64;
68const materialData = new Float32Array(t.materials.length * (MaterialSize / 4));
69const MaterialInfo = {
70 albedo: { type: Float32Array, byteOffset: 0, length: 4 },
71 metallic: { type: Float32Array, byteOffset: 16, length: 1 },
72 alphaMode: { type: Float32Array, byteOffset: 20, length: 1 },
73 alphaCutoff: { type: Float32Array, byteOffset: 24, length: 1 },
74 doubleSided: { type: Float32Array, byteOffset: 28, length: 1 },
75 emission: { type: Float32Array, byteOffset: 32, length: 3 },
76 roughness: { type: Float32Array, byteOffset: 44, length: 1 },
77 baseColorTexture: { type: Float32Array, byteOffset: 48, length: 1 },
78 normalTexture: { type: Float32Array, byteOffset: 52, length: 1 },
79 metallicRoughnessTexture: { type: Float32Array, byteOffset: 56, length: 1 },
80 emissiveTexture: { type: Float32Array, byteOffset: 60, length: 1 },
81};
82
83// NB: Very fat. Trimming these to vert should be (4*3) * 3 + 12 = 48.
84// at the point if it's just 48, might be cheaper to get rid of the triangle indicies. Skip the BVH -> tri_index lookup.
85// and then resolve the shading data with tri indicies? This would still need material index thought? To do alpha tests in trace()?
86// TODO: Trim these to only verts, the material index, and the shading index. Move the rest to shadingData[]
87// Could also, let this be. Skip the verts here and move verts directly into the BVH. And make sure the indices line up before passing it in.
88const TriangleSize = 176;
89const TriangleData = new Float32Array(t.triangles.length * (TriangleSize / 4));
90const TriangleInfo = {
91 corner_a: { type: Float32Array, byteOffset: 0, length: 3 },
92 corner_b: { type: Float32Array, byteOffset: 16, length: 3 },
93 corner_c: { type: Float32Array, byteOffset: 32, length: 3 },
94 normal_a: { type: Float32Array, byteOffset: 48, length: 3 },
95 normal_b: { type: Float32Array, byteOffset: 64, length: 3 },
96 normal_c: { type: Float32Array, byteOffset: 80, length: 3 },
97 material: { type: Float32Array, byteOffset: 92, length: 1 },
98 uVA: { type: Float32Array, byteOffset: 96, length: 2 },
99 uVB: { type: Float32Array, byteOffset: 104, length: 2 },
100 uVC: { type: Float32Array, byteOffset: 112, length: 2 },
101 tangentA: { type: Float32Array, byteOffset: 128, length: 4 },
102 tangentB: { type: Float32Array, byteOffset: 144, length: 4 },
103 tangentC: { type: Float32Array, byteOffset: 160, length: 4 },
104};
105
106// init scene buffers
107const triangleBuffer = device.createBuffer({
108 label: "Triangle Storage",
109 size: t.triangles.length * TriangleSize,
110 usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
111});
112const materialBuffer = device.createBuffer({
113 label: "Material storage",
114 size: 8 * materialData.length,
115 usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
116});
117const emissiveTrianglesBuffer = device.createBuffer({
118 label: "Emissive triangles",
119 size: t.triangles.length * TriangleSize,
120 usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
121});
122const nodeBuffer = device.createBuffer({
123 size: 32 * ab.nodesUsed,
124 usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
125});
126const triangleIndexBuffer = device.createBuffer({
127 size: 4 * t.triangles.length,
128 usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
129});
130const accumulationBuffer = device.createBuffer({
131 size: canvas.width * canvas.height * 16,
132 usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
133});
134const uniformBuffer0 = device.createBuffer({
135 label: "Camera Transform Buffer",
136 size: 512,
137 usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
138});
139const textureSizeBuffer = device.createBuffer({
140 size: Math.max(textureSizes.byteLength, 2048),
141 usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
142});
143
144// populate buffers
145const emissiveMaterialIndices: number[] = []
146const emissiveTriangleIndices: number[] = []
147const bvhPrimitiveTriangleIndices: Float32Array = new Float32Array(ab.triIdx.length);
148
149type MaterialPropertyName = keyof typeof MaterialInfo;
150t.materials.forEach((mat, i) => {
151 const materialOffset = i * (MaterialSize / 4); // Base offset for the current material
152 const setData = (propertyName: MaterialPropertyName, value: number[]) => {
153 const info = MaterialInfo[propertyName];
154 materialData.set(value, materialOffset + info.byteOffset / 4);
155 };
156 const setFloat = (propertyName: MaterialPropertyName, value: number) => {
157 const info = MaterialInfo[propertyName];
158 materialData[materialOffset + info.byteOffset / 4] = value;
159 };
160 setData("albedo", mat.baseColorFactor); // Now sets 4 floats instead of 3
161 setFloat("metallic", mat.metallicFactor);
162 setFloat("alphaMode", mat.alphaMode);
163 setFloat("alphaCutoff", mat.alphaCutoff);
164 setFloat("doubleSided", mat.doubleSided);
165 setData("emission", mat.emissiveFactor);
166 setFloat("roughness", mat.roughnessFactor);
167 setFloat("baseColorTexture", mat.baseColorTexture);
168 setFloat("normalTexture", mat.normalTexture);
169 setFloat("metallicRoughnessTexture", mat.metallicRoughnessTexture);
170 setFloat("emissiveTexture", mat.emissiveTexture);
171 if (mat.emissiveFactor[0] !== 0 || mat.emissiveFactor[1] !== 0 || mat.emissiveFactor[2] !== 0) {
172 emissiveMaterialIndices.push(i);
173 }
174});
175device.queue.writeBuffer(materialBuffer, 0, materialData);
176
177type TrianglePropertyName = keyof typeof TriangleInfo;
178t.triangles.forEach((tri, i) => {
179 const triOffset = i * (TriangleSize / 4);
180 const setData = (propertyName: TrianglePropertyName, value: number[]) => {
181 const info = TriangleInfo[propertyName];
182 TriangleData.set(value, triOffset + info.byteOffset / 4);
183 };
184 const setFloat = (propertyName: TrianglePropertyName, value: number) => {
185 const info = TriangleInfo[propertyName];
186 TriangleData[triOffset + info.byteOffset / 4] = value;
187 };
188 setData("corner_a", tri.cornerA);
189 setData("corner_b", tri.cornerB);
190 setData("corner_c", tri.cornerC);
191 setData("normal_a", tri.normalA);
192 setData("normal_b", tri.normalB);
193 setData("normal_c", tri.normalC);
194 setFloat("material", tri.mat);
195 setData("uVA", tri.uvA);
196 setData("uVB", tri.uvB);
197 setData("uVC", tri.uvC);
198 setData("tangentA", tri.tangentA);
199 setData("tangentB", tri.tangentB);
200 setData("tangentC", tri.tangentC);
201 if (emissiveMaterialIndices.includes(tri.mat)) {
202 emissiveTriangleIndices.push(i); // Push the triangle's index
203 }
204});
205device.queue.writeBuffer(triangleBuffer, 0, TriangleData);
206device.queue.writeBuffer(emissiveTrianglesBuffer, 0, new Float32Array(emissiveTriangleIndices));
207
208const nodeData: Float32Array = new Float32Array(8 * ab.nodesUsed);
209for (let i = 0; i < ab.nodesUsed; i++) {
210 const minOffset = i * 3;
211 const maxOffset = i * 3;
212 nodeData[8 * i] = ab.nodesMin[minOffset + 0];
213 nodeData[8 * i + 1] = ab.nodesMin[minOffset + 1];
214 nodeData[8 * i + 2] = ab.nodesMin[minOffset + 2];
215 nodeData[8 * i + 3] = ab.nodesLeft[i];
216 nodeData[8 * i + 4] = ab.nodesMax[maxOffset + 0];
217 nodeData[8 * i + 5] = ab.nodesMax[maxOffset + 1];
218 nodeData[8 * i + 6] = ab.nodesMax[maxOffset + 2];
219 nodeData[8 * i + 7] = ab.nodesInstanceCount[i];
220}
221device.queue.writeBuffer(nodeBuffer, 0, nodeData, 0, 8 * ab.nodesUsed);
222
223for (let i = 0; i < ab.triIdx.length; i++) {
224 bvhPrimitiveTriangleIndices[i] = ab.triIdx[i];
225}
226device.queue.writeBuffer(triangleIndexBuffer, 0, bvhPrimitiveTriangleIndices, 0, ab.triIdx.length);
227
228// bluenoise texture 2. 3.24: Bluenoise
229// bluenoise texture form https://momentsingraphics.de/BlueNoise.html
230async function loadImageBitmap(url: string) {
231 const res = await fetch(url);
232 const blob = await res.blob();
233 return await createImageBitmap(blob, { colorSpaceConversion: 'none' });
234}
235const bnnoiseSource = await loadImageBitmap("LDR_RGBA_0.png")
236const blueNoiseTexture = device.createTexture({
237 label: 'bluenoise-texture',
238 format: 'rgba8unorm',
239 size: [bnnoiseSource.width, bnnoiseSource.height],
240 usage: GPUTextureUsage.COPY_DST | GPUTextureUsage.STORAGE_BINDING | GPUTextureUsage.TEXTURE_BINDING,
241});
242device.queue.copyExternalImageToTexture(
243 { source: bnnoiseSource },
244 { texture: blueNoiseTexture },
245 { width: bnnoiseSource.width, height: bnnoiseSource.height },
246);
247
248
249// construct the texture atlas
250const emptySampler = device.createSampler({
251 addressModeU: "clamp-to-edge",
252 addressModeV: "clamp-to-edge",
253 addressModeW: "clamp-to-edge",
254 magFilter: "nearest",
255 minFilter: "nearest",
256 mipmapFilter: "nearest",
257});
258const emptyTexture = device.createTexture({
259 size: [1, 1, 1],
260 format: "rgba8unorm",
261 usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.COPY_DST,
262});
263const emptyView = emptyTexture.createView({
264 dimension: "2d-array",
265});
266let maxWidth = 1, maxHeight = 1;
267let textureSampler = emptySampler;
268let textureArray = emptyTexture;
269let textureViewArray = emptyView;
270if (hasTextures) {
271 maxWidth = t.largestTextureDimensions.width
272 maxHeight = t.largestTextureDimensions.height
273 textureSampler = t.textures[0].sampler // lazy, store the samplers correctly
274 textureArray = device.createTexture({
275 size: [maxWidth, maxHeight, t.textures.length],
276 format: "rgba8unorm",
277 usage:
278 GPUTextureUsage.TEXTURE_BINDING |
279 GPUTextureUsage.COPY_DST |
280 GPUTextureUsage.RENDER_ATTACHMENT,
281 dimension: "2d",
282 })
283 textureViewArray = textureArray.createView({ dimension: "2d-array" });
284}
285
286// rather wasteful (and sometimes incorrect, but whatever. Fine for now)
287// 1. get each texture's dimension
288// 2. pad it to the largest one
289// 3. store the original h w in textureSizes[]
290// 4. stack the padded texture
291if (t.textures.length) {
292 for (let i = 0; i < t.textures.length; i++) {
293 const source = t.textures[i].source;
294 // @ts-ignore / Poorly defined type for the original GLTFImagePostprocessed
295 const bitmap = source.image as ImageBitmap;
296 textureSizes[i * 4] = bitmap.width;
297 textureSizes[i * 4 + 1] = bitmap.height;
298 textureSizes[i * 4 + 2] = 0.0;
299 textureSizes[i * 4 + 3] = 0.0;
300
301 device.queue.copyExternalImageToTexture(
302 { source: bitmap },
303 { texture: textureArray, origin: [0, 0, i] },
304 [bitmap.width, bitmap.height, 1]
305 );
306 }
307 device.queue.writeBuffer(textureSizeBuffer, 0, textureSizes);
308}
309
310// bind groups and layouts
311const geometryBindgroupLayout = device.createBindGroupLayout({
312 label: 'geometry-bind-group-layout',
313 entries: [
314 {
315 binding: 0,
316 visibility: GPUShaderStage.COMPUTE,
317 storageTexture: {
318 access: "write-only",
319 format: "rgba32float",
320 viewDimension: "2d",
321 },
322 },
323 {
324 binding: 1,
325 visibility: GPUShaderStage.COMPUTE,
326 buffer: {
327 type: "read-only-storage",
328 hasDynamicOffset: false,
329 },
330 },
331 {
332 binding: 2,
333 visibility: GPUShaderStage.COMPUTE,
334 buffer: {
335 type: "uniform",
336 },
337 },
338
339 {
340 binding: 5,
341 visibility: GPUShaderStage.COMPUTE,
342 buffer: {
343 type: "read-only-storage",
344 hasDynamicOffset: false,
345 },
346 },
347 {
348 binding: 6,
349 visibility: GPUShaderStage.COMPUTE,
350 buffer: {
351 type: "read-only-storage",
352 hasDynamicOffset: false,
353 },
354 },
355 {
356 binding: 7,
357 visibility: GPUShaderStage.COMPUTE,
358 buffer: {
359 type: "storage",
360 },
361 },
362 ],
363});
364
365const geometryBindgroup = device.createBindGroup({
366 label: 'geometry-bind-group',
367 layout: geometryBindgroupLayout,
368 entries: [
369 {
370 binding: 0,
371 resource: viewportTextureColorBuffer,
372 },
373 {
374 binding: 1,
375 resource: {
376 buffer: triangleBuffer,
377 },
378 },
379 {
380 binding: 2,
381 resource: {
382 buffer: uniformBuffer0,
383 },
384 },
385 {
386 binding: 5,
387 resource: { buffer: nodeBuffer },
388 },
389 {
390 binding: 6,
391 resource: { buffer: triangleIndexBuffer },
392 },
393 {
394 binding: 7,
395 resource: { buffer: accumulationBuffer },
396 },
397 ],
398});
399
400const shadingBindGroupLayout = device.createBindGroupLayout({
401 label: 'shading-bind-group-layout',
402 entries: [
403 {
404 binding: 0,
405 visibility: GPUShaderStage.COMPUTE,
406 buffer: {
407 type: "read-only-storage",
408 hasDynamicOffset: false,
409 },
410 },
411 {
412 binding: 1,
413 visibility: GPUShaderStage.COMPUTE,
414 texture: {
415 viewDimension: "2d-array",
416 },
417 },
418 {
419 binding: 2,
420 visibility: GPUShaderStage.COMPUTE,
421 sampler: {},
422 },
423 {
424 binding: 4,
425 visibility: GPUShaderStage.COMPUTE,
426 buffer: {
427 type: "uniform",
428 hasDynamicOffset: false,
429 },
430 },
431 {
432 binding: 6,
433 visibility: GPUShaderStage.COMPUTE,
434 storageTexture: {
435 access: "read-only",
436 format: "rgba8unorm",
437 viewDimension: "2d",
438 },
439 },
440 {
441 binding: 7,
442 visibility: GPUShaderStage.COMPUTE,
443 buffer: {
444 type: "read-only-storage",
445 hasDynamicOffset: false,
446 },
447 },
448 ],
449});
450
451const shadingBindGroup = device.createBindGroup({
452 label: 'shading-bind-group',
453 layout: shadingBindGroupLayout,
454 entries: [
455 {
456 binding: 0,
457 resource: {
458 buffer: materialBuffer,
459 },
460 },
461 { binding: 1, resource: textureViewArray },
462 { binding: 2, resource: textureSampler },
463 {
464 binding: 4,
465 resource: {
466 buffer: textureSizeBuffer,
467 },
468 },
469 {
470 binding: 6,
471 resource: blueNoiseTexture.createView(),
472 },
473 {
474 binding: 7,
475 resource: {
476 buffer: emissiveTrianglesBuffer,
477 },
478 },
479 ],
480});
481
482const viewportBindgroupLayout = device.createBindGroupLayout({
483 entries: [
484 {
485 binding: 0,
486 visibility: GPUShaderStage.FRAGMENT,
487 texture: {
488 sampleType: 'unfilterable-float',
489 viewDimension: '2d',
490 multisampled: false,
491 },
492
493 },
494 ],
495});
496
497const viewportBindgroup = device.createBindGroup({
498 layout: viewportBindgroupLayout,
499 entries: [
500 {
501 binding: 0,
502 resource: viewportTextureColorBuffer,
503 },
504 ],
505});
506
507// pipelines
508const kernelPipelineLayout = device.createPipelineLayout({
509 bindGroupLayouts: [geometryBindgroupLayout, shadingBindGroupLayout],
510});
511
512const kernelPipeline = device.createComputePipeline({
513 layout: kernelPipelineLayout,
514 compute: {
515 module: device.createShaderModule({
516 code: kernel,
517 }),
518 entryPoint: "main",
519 },
520});
521
522const viewportPipelineLayout = device.createPipelineLayout({
523 bindGroupLayouts: [viewportBindgroupLayout],
524});
525
526const viewportPipeline = device.createRenderPipeline({
527 layout: viewportPipelineLayout,
528 vertex: {
529 module: device.createShaderModule({
530 code: viewport,
531 }),
532 entryPoint: "vert_main",
533 },
534 fragment: {
535 module: device.createShaderModule({
536 code: viewport,
537 }),
538 entryPoint: "frag_main",
539 targets: [
540 {
541 format: format,
542 },
543 ],
544 },
545 primitive: {
546 topology: "triangle-list",
547 },
548});
549
550
551var frametime = 0;
552window.framecount = 0;
553const UNIFORMS = {
554 sample_count: 1.0,
555 bounce_count: 3.0,
556 aperture: 0.1,
557 focal_length: 4.0,
558 frameTimeMs: 0,
559 fps: frametime / 1000,
560 sun_angle: { x: 0.3, y: -0.7, z: 0.3 },
561 sun_color: { r: 1.0, g: 0.96, b: 0.85 },
562 scale: 22000.0, // sun_color rgb -> lux scale
563 albedo_factor: z.materials[0].baseColorFactor,
564 metallicFactor: z.materials[0].metallicFactor,
565 roughnessFactor: z.materials[0].roughnessFactor,
566 thin_lens: false,
567};
568// initialize values based on UNIFORMS, updates are createPane()
569device.queue.writeBuffer(uniformBuffer0, 208, Vec3.fromValues(UNIFORMS.sun_angle.x, UNIFORMS.sun_angle.y, UNIFORMS.sun_angle.z));
570device.queue.writeBuffer(uniformBuffer0, 220, new Float32Array([0.53 * (Math.PI / 180.0)])); // ~0.5332 degrees / 32.15 arcminutes
571device.queue.writeBuffer(uniformBuffer0, 224, Vec3.fromValues(UNIFORMS.sun_color.r * UNIFORMS.scale, UNIFORMS.sun_color.g * UNIFORMS.scale, UNIFORMS.sun_color.b * UNIFORMS.scale));
572device.queue.writeBuffer(uniformBuffer0, 236, new Float32Array([UNIFORMS.sample_count, UNIFORMS.bounce_count, UNIFORMS.aperture, UNIFORMS.focal_length]));
573device.queue.writeBuffer(uniformBuffer0, 252, new Float32Array([emissiveTriangleIndices.length - 1, UNIFORMS.thin_lens ? 1 : 0]));
574
575let camera = createCamera(canvas);
576InitPane(device, UNIFORMS, uniformBuffer0)
577setupCameraInput(canvas)
578
579device.queue.writeBuffer(uniformBuffer0, 0, camera.position);
580device.queue.writeBuffer(uniformBuffer0, 16, camera.view);
581device.queue.writeBuffer(uniformBuffer0, 80, camera.inverseView);
582device.queue.writeBuffer(uniformBuffer0, 144, camera.projection);
583
584let cpuStart = 0;
585let cpuEnd = 0;
586let frametimeMs;
587const framedata = new Float32Array(1);
588
589const workgroupSize = 16;
590const dispatchX = Math.ceil(width / workgroupSize);
591const dispatchY = Math.ceil(height / workgroupSize);
592
593async function renderFrame() {
594 cpuStart = performance.now();
595 window.framecount++;
596 framedata[0] = window.framecount;
597 updateMovementInput();
598 updateCamera(camera);
599 if (camera.dirty) {
600 window.framecount = 0; // reset accumulation
601 device.queue.writeBuffer(uniformBuffer0, 0, camera.position);
602 device.queue.writeBuffer(uniformBuffer0, 16, camera.view);
603 device.queue.writeBuffer(uniformBuffer0, 80, camera.inverseView);
604 device.queue.writeBuffer(uniformBuffer0, 144, camera.projection);
605 }
606 device.queue.writeBuffer(uniformBuffer0, 12, framedata);
607 const commandEncoder = device.createCommandEncoder();
608 // compute pass
609 var computePass = commandEncoder.beginComputePass();
610 computePass.setPipeline(kernelPipeline);
611 computePass.setBindGroup(0, geometryBindgroup);
612 computePass.setBindGroup(1, shadingBindGroup);
613 computePass.dispatchWorkgroups(dispatchX, dispatchY);
614 computePass.end();
615 // blitt pass
616 const renderPass = commandEncoder.beginRenderPass({
617 label: "main",
618 colorAttachments: [
619 {
620 view: context.getCurrentTexture().createView(),
621 clearValue: { r: 0.0, g: 0.0, b: 0.0, a: 0.0 }, // rgba
622 loadOp: "clear",
623 storeOp: "store",
624 },
625 ],
626 });
627 renderPass.setPipeline(viewportPipeline);
628 renderPass.setBindGroup(0, viewportBindgroup);
629 renderPass.draw(6, 1, 0, 0);
630 renderPass.end();
631 device.queue.submit([commandEncoder.finish()]);
632 device.queue.onSubmittedWorkDone().then(
633 () => {
634 cpuEnd = performance.now();
635 frametimeMs = cpuEnd - cpuStart;
636 frametime = parseInt(frametimeMs.toFixed(2));
637 UNIFORMS.frameTimeMs = frametime;
638 }
639 );
640 requestAnimationFrame(renderFrame);
641}
642
643requestAnimationFrame(renderFrame);