🎯 Exemples recommandés
Balanced sample collections from various categories for you to explore
API Graphique WebGPU
API graphique moderne pour les graphiques 3D haute performance et le calcul GPU dans le navigateur
💻 Configuration de Base WebGPU javascript
🟡 intermediate
⭐⭐⭐
Initialiser WebGPU et créer un pipeline de rendu simple
⏱️ 20 min
🏷️ webgpu, graphics, rendering, 3d, shaders
Prerequisites:
JavaScript, Graphics concepts, Shaders basics
// Basic WebGPU Setup
class WebGPURenderer {
constructor(canvas) {
this.canvas = canvas;
this.device = null;
this.context = null;
this.format = null;
}
async init() {
// Check WebGPU support
if (!navigator.gpu) {
throw new Error('WebGPU not supported');
}
// Get adapter and device
const adapter = await navigator.gpu.requestAdapter();
if (!adapter) {
throw new Error('No appropriate GPU adapter found');
}
this.device = await adapter.requestDevice();
// Configure canvas context
this.context = this.canvas.getContext('webgpu');
this.format = navigator.gpu.getPreferredCanvasFormat();
this.context.configure({
device: this.device,
format: this.format,
alphaMode: 'opaque',
});
console.log('WebGPU initialized successfully');
}
createBasicPipeline() {
// Vertex shader
const vertexShaderCode = `
struct VertexOutput {
@builtin(position) position: vec4<f32>,
@location(0) color: vec3<f32>,
}
@vertex
fn main(@builtin(vertex_index) vertexIndex: u32) -> VertexOutput {
var output: VertexOutput;
// Triangle vertices
let pos = array<vec2<f32>, 3>(
vec2<f32>(-0.5, -0.5),
vec2<f32>( 0.5, -0.5),
vec2<f32>( 0.0, 0.5)
);
let colors = array<vec3<f32>, 3>(
vec3<f32>(1.0, 0.0, 0.0), // Red
vec3<f32>(0.0, 1.0, 0.0), // Green
vec3<f32>(0.0, 0.0, 1.0) // Blue
);
output.position = vec4<f32>(pos[vertexIndex], 0.0, 1.0);
output.color = colors[vertexIndex];
return output;
}
`;
// Fragment shader
const fragmentShaderCode = `
@fragment
fn main(@location(0) color: vec3<f32>) -> @location(0) vec4<f32> {
return vec4<f32>(color, 1.0);
}
`;
// Create shaders
const vertexShader = this.device.createShaderModule({
code: vertexShaderCode
});
const fragmentShader = this.device.createShaderModule({
code: fragmentShaderCode
});
// Create pipeline
const pipeline = this.device.createRenderPipeline({
layout: 'auto',
vertex: {
module: vertexShader,
entryPoint: 'main',
},
fragment: {
module: fragmentShader,
entryPoint: 'main',
targets: [{
format: this.format,
}],
},
primitive: {
topology: 'triangle-list',
},
});
return pipeline;
}
render(pipeline) {
// Create command encoder
const commandEncoder = this.device.createCommandEncoder();
// Create render pass
const textureView = this.context.getCurrentTexture().createView();
const renderPass = commandEncoder.beginRenderPass({
colorAttachments: [{
view: textureView,
clearValue: { r: 0.1, g: 0.1, b: 0.1, a: 1.0 },
loadOp: 'clear',
storeOp: 'store',
}],
});
renderPass.setPipeline(pipeline);
renderPass.draw(3); // Draw 3 vertices for a triangle
renderPass.end();
// Submit commands
this.device.queue.submit([commandEncoder.finish()]);
}
async start() {
await this.init();
const pipeline = this.createBasicPipeline();
// Animation loop
const frame = () => {
this.render(pipeline);
requestAnimationFrame(frame);
};
frame();
}
}
// Usage
async function main() {
const canvas = document.getElementById('canvas');
const renderer = new WebGPURenderer(canvas);
try {
await renderer.start();
} catch (error) {
console.error('WebGPU initialization failed:', error);
}
}
// Check for WebGPU support before running
if (navigator.gpu) {
main();
} else {
console.warn('WebGPU is not supported in this browser');
}
💻 Mapping de Texture WebGPU javascript
🟡 intermediate
⭐⭐⭐⭐
Appliquer des textures aux objets 3D avec WebGPU
⏱️ 25 min
🏷️ webgpu, texture, mapping, graphics, rendering
Prerequisites:
JavaScript, WebGPU basics, Texture mapping concepts
// WebGPU Texture Mapping
class TextureRenderer extends WebGPURenderer {
async createTexturedPipeline(texture) {
// Vertex shader with UV coordinates
const vertexShaderCode = `
struct VertexInput {
@location(0) position: vec2<f32>,
@location(1) uv: vec2<f32>,
}
struct VertexOutput {
@builtin(position) position: vec4<f32>,
@location(0) uv: vec2<f32>,
}
@vertex
fn main(input: VertexInput) -> VertexOutput {
var output: VertexOutput;
output.position = vec4<f32>(input.position, 0.0, 1.0);
output.uv = input.uv;
return output;
}
`;
// Fragment shader with texture sampling
const fragmentShaderCode = `
@group(0) @binding(0) var textureSampler: sampler;
@group(0) @binding(1) var textureData: texture_2d<f32>;
@fragment
fn main(@location(0) uv: vec2<f32>) -> @location(0) vec4<f32> {
return textureSample(textureData, textureSampler, uv);
}
`;
// Create shaders
const vertexShader = this.device.createShaderModule({
code: vertexShaderCode
});
const fragmentShader = this.device.createShaderModule({
code: fragmentShaderCode
});
// Create vertex buffer (position + UV)
const vertices = new Float32Array([
// Position (x, y) UV (u, v)
-0.8, -0.8, 0.0, 1.0, // Bottom left
0.8, -0.8, 1.0, 1.0, // Bottom right
0.8, 0.8, 1.0, 0.0, // Top right
-0.8, 0.8, 0.0, 0.0, // Top left
]);
const vertexBuffer = this.device.createBuffer({
size: vertices.byteLength,
usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
});
this.device.queue.writeBuffer(vertexBuffer, 0, vertices);
// Create texture sampler
const sampler = this.device.createSampler({
magFilter: 'linear',
minFilter: 'linear',
addressModeU: 'repeat',
addressModeV: 'repeat',
});
// Create bind group
const bindGroup = this.device.createBindGroup({
layout: this.device.createBindGroupLayout({
entries: [
{
binding: 0,
visibility: GPUShaderStage.FRAGMENT,
sampler: { type: 'filtering' },
},
{
binding: 1,
visibility: GPUShaderStage.FRAGMENT,
texture: { sampleType: 'float' },
},
],
}),
entries: [
{ binding: 0, resource: sampler },
{ binding: 1, resource: texture.createView() },
],
});
// Create pipeline
const pipeline = this.device.createRenderPipeline({
layout: this.device.createPipelineLayout({
bindGroupLayouts: [bindGroup.layout],
}),
vertex: {
module: vertexShader,
entryPoint: 'main',
buffers: [{
arrayStride: 4 * 4, // 4 floats per vertex (x, y, u, v)
attributes: [
{
format: 'float32x2',
offset: 0,
shaderLocation: 0, // position
},
{
format: 'float32x2',
offset: 2 * 4,
shaderLocation: 1, // uv
},
],
}],
},
fragment: {
module: fragmentShader,
entryPoint: 'main',
targets: [{
format: this.format,
}],
},
primitive: {
topology: 'triangle-strip',
},
});
return { pipeline, vertexBuffer, bindGroup };
}
async loadTexture(imageUrl) {
const response = await fetch(imageUrl);
const imageBitmap = await createImageBitmap(await response.blob());
const texture = this.device.createTexture({
size: [imageBitmap.width, imageBitmap.height, 1],
format: 'rgba8unorm',
usage: GPUTextureUsage.TEXTURE_BINDING | GPUTextureUsage.COPY_DST | GPUTextureUsage.RENDER_ATTACHMENT,
});
this.device.queue.copyExternalImageToTexture(
{ source: imageBitmap },
{ texture: texture },
[imageBitmap.width, imageBitmap.height]
);
return texture;
}
async renderTexturedQuad(textureUrl) {
await this.init();
const texture = await this.loadTexture(textureUrl);
const { pipeline, vertexBuffer, bindGroup } = await this.createTexturedPipeline(texture);
const frame = () => {
const commandEncoder = this.device.createCommandEncoder();
const textureView = this.context.getCurrentTexture().createView();
const renderPass = commandEncoder.beginRenderPass({
colorAttachments: [{
view: textureView,
clearValue: { r: 0.1, g: 0.1, b: 0.1, a: 1.0 },
loadOp: 'clear',
storeOp: 'store',
}],
});
renderPass.setPipeline(pipeline);
renderPass.setVertexBuffer(0, vertexBuffer);
renderPass.setBindGroup(0, bindGroup);
renderPass.draw(4); // Draw 4 vertices for a quad
renderPass.end();
this.device.queue.submit([commandEncoder.finish()]);
requestAnimationFrame(frame);
};
frame();
}
}
// Usage
async function renderTexturedImage() {
const canvas = document.getElementById('canvas');
const renderer = new TextureRenderer(canvas);
await renderer.renderTexturedQuad('texture.jpg');
}
renderTexturedImage();
💻 Compute Shaders WebGPU javascript
🔴 complex
⭐⭐⭐⭐⭐
Calcul GPU avec compute shaders pour le traitement parallèle
⏱️ 30 min
🏷️ webgpu, compute, shaders, gpu, parallel
Prerequisites:
JavaScript, WebGPU, GPU computing, Parallel programming
// WebGPU Compute Shaders for Parallel Processing
class ComputeRenderer {
async init() {
if (!navigator.gpu) throw new Error('WebGPU not supported');
const adapter = await navigator.gpu.requestAdapter();
this.device = await adapter.requestDevice();
}
async createComputePipeline() {
// Compute shader for parallel matrix multiplication
const computeShaderCode = `
@group(0) @binding(0) var<storage, read> matrixA: array<f32>;
@group(0) @binding(1) var<storage, read> matrixB: array<f32>;
@group(0) @binding(2) var<storage, read_write> result: array<f32>;
struct Params {
size: u32,
}
@group(0) @binding(3) var<uniform> params: Params;
@compute @workgroup_size(16, 16)
fn main(@builtin(global_invocation_id) id: vec3<u32>) {
let row = id.x;
let col = id.y;
if (row >= params.size || col >= params.size) {
return;
}
var sum = 0.0;
for (var i = 0u; i < params.size; i = i + 1u) {
sum = sum + matrixA[row * params.size + i] * matrixB[i * params.size + col];
}
result[row * params.size + col] = sum;
}
`;
const computeShader = this.device.createShaderModule({
code: computeShaderCode
});
const pipeline = this.device.createComputePipeline({
layout: 'auto',
compute: {
module: computeShader,
entryPoint: 'main',
},
});
return pipeline;
}
createMatrices(size) {
const matrixA = new Float32Array(size * size);
const matrixB = new Float32Array(size * size);
const result = new Float32Array(size * size);
// Initialize matrices with random values
for (let i = 0; i < size * size; i++) {
matrixA[i] = Math.random();
matrixB[i] = Math.random();
}
return { matrixA, matrixB, result };
}
async matrixMultiplication(size = 512) {
const { matrixA, matrixB, result } = this.createMatrices(size);
// Create GPU buffers
const bufferA = this.device.createBuffer({
size: matrixA.byteLength,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
});
const bufferB = this.device.createBuffer({
size: matrixB.byteLength,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_DST,
});
const bufferResult = this.device.createBuffer({
size: result.byteLength,
usage: GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC,
});
// CPU buffer for reading results
const readBuffer = this.device.createBuffer({
size: result.byteLength,
usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.MAP_READ,
});
// Write data to GPU
this.device.queue.writeBuffer(bufferA, 0, matrixA);
this.device.queue.writeBuffer(bufferB, 0, matrixB);
// Uniform buffer for matrix size
const uniformBuffer = this.device.createBuffer({
size: 4, // 1 uint32
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
});
const sizeArray = new Uint32Array([size]);
this.device.queue.writeBuffer(uniformBuffer, 0, sizeArray);
// Create bind group
const pipeline = await this.createComputePipeline();
const bindGroup = this.device.createBindGroup({
layout: pipeline.getBindGroupLayout(0),
entries: [
{ binding: 0, resource: { buffer: bufferA } },
{ binding: 1, resource: { buffer: bufferB } },
{ binding: 2, resource: { buffer: bufferResult } },
{ binding: 3, resource: { buffer: uniformBuffer } },
],
});
// Execute compute shader
const commandEncoder = this.device.createCommandEncoder();
const computePass = commandEncoder.beginComputePass();
computePass.setPipeline(pipeline);
computePass.setBindGroup(0, bindGroup);
computePass.dispatchWorkgroups(
Math.ceil(size / 16),
Math.ceil(size / 16)
);
computePass.end();
// Copy result to readable buffer
commandEncoder.copyBufferToBuffer(bufferResult, 0, readBuffer, 0, result.byteLength);
// Submit commands
this.device.queue.submit([commandEncoder.finish()]);
// Read results
await readBuffer.mapAsync(GPUMapMode.READ);
const gpuResult = new Float32Array(readBuffer.getMappedRange());
const cpuResult = new Float32Array(gpuResult.slice(0));
readBuffer.unmap();
return cpuResult;
}
// Particle simulation compute shader
async createParticleSimulation() {
const particleShaderCode = `
struct Particle {
position: vec2<f32>,
velocity: vec2<f32>,
}
@group(0) @binding(0) var<storage, read_write> particles: array<Particle>;
struct Params {
deltaTime: f32,
count: u32,
}
@group(0) @binding(1) var<uniform> params: Params;
@compute @workgroup_size(64)
fn main(@builtin(global_invocation_id) id: vec3<u32>) {
let index = id.x;
if (index >= params.count) {
return;
}
var particle = particles[index];
// Apply gravity
particle.velocity.y = particle.velocity.y - 9.81 * params.deltaTime;
// Update position
particle.position = particle.position + particle.velocity * params.deltaTime;
// Bounce off bottom
if (particle.position.y < -1.0) {
particle.position.y = -1.0;
particle.velocity.y = -particle.velocity.y * 0.8;
}
// Bounce off sides
if (abs(particle.position.x) > 1.0) {
particle.position.x = sign(particle.position.x) * 1.0;
particle.velocity.x = -particle.velocity.x * 0.8;
}
particles[index] = particle;
}
`;
const shader = this.device.createShaderModule({
code: particleShaderCode
});
return this.device.createComputePipeline({
layout: 'auto',
compute: {
module: shader,
entryPoint: 'main',
},
});
}
}
// Usage
async function runComputeExample() {
const compute = new ComputeRenderer();
await compute.init();
// Matrix multiplication benchmark
console.time('GPU Matrix Multiplication');
const gpuResult = await compute.matrixMultiplication(512);
console.timeEnd('GPU Matrix Multiplication');
// For comparison, CPU implementation
function cpuMatrixMultiply(A, B, size) {
const result = new Float32Array(size * size);
for (let i = 0; i < size; i++) {
for (let j = 0; j < size; j++) {
let sum = 0;
for (let k = 0; k < size; k++) {
sum += A[i * size + k] * B[k * size + j];
}
result[i * size + j] = sum;
}
}
return result;
}
const { matrixA, matrixB } = compute.createMatrices(512);
console.time('CPU Matrix Multiplication');
const cpuResult = cpuMatrixMultiply(matrixA, matrixB, 512);
console.timeEnd('CPU Matrix Multiplication');
console.log('Performance improvement achieved!');
}
if (navigator.gpu) {
runComputeExample();
}
💻 Rendu de Scène 3D javascript
🔴 complex
⭐⭐⭐⭐⭐
Créer une scène 3D complète avec éclairage et contrôles de caméra
⏱️ 35 min
🏷️ webgpu, 3d, lighting, scene, graphics
Prerequisites:
JavaScript, WebGPU, 3D graphics, Linear algebra
// WebGPU 3D Scene with Lighting
class Scene3D {
constructor(canvas) {
this.canvas = canvas;
this.device = null;
this.context = null;
this.format = null;
this.pipeline = null;
this.uniformBuffer = null;
this.camera = {
position: [0, 0, 5],
rotation: [0, 0, 0],
fov: Math.PI / 4,
aspect: canvas.width / canvas.height,
near: 0.1,
far: 100
};
}
async init() {
if (!navigator.gpu) throw new Error('WebGPU not supported');
const adapter = await navigator.gpu.requestAdapter();
this.device = await adapter.requestDevice();
this.context = this.canvas.getContext('webgpu');
this.format = navigator.gpu.getPreferredCanvasFormat();
this.context.configure({
device: this.device,
format: this.format,
alphaMode: 'opaque',
});
await this.createPipeline();
await this.createGeometry();
this.createUniforms();
}
async createPipeline() {
// Vertex shader with 3D transformations
const vertexShaderCode = `
struct Uniforms {
modelMatrix: mat4x4<f32>,
viewMatrix: mat4x4<f32>,
projectionMatrix: mat4x4<f32>,
normalMatrix: mat4x4<f32>,
cameraPosition: vec3<f32>,
lightPosition: vec3<f32>,
lightColor: vec3<f32>,
lightIntensity: f32,
}
struct VertexInput {
@location(0) position: vec3<f32>,
@location(1) normal: vec3<f32>,
@location(2) color: vec3<f32>,
}
struct VertexOutput {
@builtin(position) position: vec4<f32>,
@location(0) worldPosition: vec3<f32>,
@location(1) worldNormal: vec3<f32>,
@location(2) color: vec3<f32>,
}
@group(0) @binding(0) var<uniform> uniforms: Uniforms;
@vertex
fn main(input: VertexInput) -> VertexOutput {
var output: VertexOutput;
// Transform position to world space
let worldPos = uniforms.modelMatrix * vec4<f32>(input.position, 1.0);
output.worldPosition = worldPos.xyz;
// Transform normal to world space
output.worldNormal = normalize((uniforms.normalMatrix * vec4<f32>(input.normal, 0.0)).xyz);
// Transform to clip space
output.position = uniforms.projectionMatrix * uniforms.viewMatrix * worldPos;
// Pass through color
output.color = input.color;
return output;
}
`;
// Fragment shader with Phong lighting
const fragmentShaderCode = `
struct Uniforms {
modelMatrix: mat4x4<f32>,
viewMatrix: mat4x4<f32>,
projectionMatrix: mat4x4<f32>,
normalMatrix: mat4x4<f32>,
cameraPosition: vec3<f32>,
lightPosition: vec3<f32>,
lightColor: vec3<f32>,
lightIntensity: f32,
}
@group(0) @binding(0) var<uniform> uniforms: Uniforms;
@fragment
fn main(
@location(0) worldPosition: vec3<f32>,
@location(1) worldNormal: vec3<f32>,
@location(2) color: vec3<f32>
) -> @location(0) vec4<f32> {
// Phong lighting model
// Ambient
let ambient = vec3<f32>(0.1, 0.1, 0.1);
// Diffuse
let lightDir = normalize(uniforms.lightPosition - worldPosition);
let diffuse = max(dot(worldNormal, lightDir), 0.0) * uniforms.lightColor * uniforms.lightIntensity;
// Specular
let viewDir = normalize(uniforms.cameraPosition - worldPosition);
let reflectDir = reflect(-lightDir, worldNormal);
let specular = pow(max(dot(viewDir, reflectDir), 0.0), 32.0) * uniforms.lightColor;
let finalColor = (ambient + diffuse + specular) * color;
return vec4<f32>(finalColor, 1.0);
}
`;
const vertexShader = this.device.createShaderModule({
code: vertexShaderCode
});
const fragmentShader = this.device.createShaderModule({
code: fragmentShaderCode
});
this.pipeline = this.device.createRenderPipeline({
layout: 'auto',
vertex: {
module: vertexShader,
entryPoint: 'main',
buffers: [
{
arrayStride: 3 * 4, // position
attributes: [{ format: 'float32x3', offset: 0, shaderLocation: 0 }]
},
{
arrayStride: 3 * 4, // normal
attributes: [{ format: 'float32x3', offset: 0, shaderLocation: 1 }]
},
{
arrayStride: 3 * 4, // color
attributes: [{ format: 'float32x3', offset: 0, shaderLocation: 2 }]
}
]
},
fragment: {
module: fragmentShader,
entryPoint: 'main',
targets: [{ format: this.format }]
},
primitive: {
topology: 'triangle-list',
cullMode: 'back'
},
depthStencil: {
depthWriteEnabled: true,
depthCompare: 'less',
format: 'depth24plus'
}
});
}
async createGeometry() {
// Create a cube
const positions = new Float32Array([
// Front face
-1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1,
// Back face
-1, -1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1,
// Top face
-1, 1, -1, -1, 1, 1, 1, 1, 1, 1, 1, -1,
// Bottom face
-1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1,
// Right face
1, -1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1,
// Left face
-1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1, -1
]);
const normals = new Float32Array([
// Front, Back, Top, Bottom, Right, Left
0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1,
0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1,
0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0,
0, -1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0,
1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0,
-1, 0, 0, -1, 0, 0, -1, 0, 0, -1, 0, 0
]);
const colors = new Float32Array([
1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, // Red
0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, // Green
0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, // Blue
1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, // Yellow
1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, // Magenta
0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 // Cyan
]);
const indices = new Uint16Array([
0, 1, 2, 0, 2, 3, // Front
4, 5, 6, 4, 6, 7, // Back
8, 9, 10, 8, 10, 11, // Top
12, 13, 14, 12, 14, 15, // Bottom
16, 17, 18, 16, 18, 19, // Right
20, 21, 22, 20, 22, 23 // Left
]);
this.positionBuffer = this.device.createBuffer({
size: positions.byteLength,
usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
});
this.normalBuffer = this.device.createBuffer({
size: normals.byteLength,
usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
});
this.colorBuffer = this.device.createBuffer({
size: colors.byteLength,
usage: GPUBufferUsage.VERTEX | GPUBufferUsage.COPY_DST,
});
this.indexBuffer = this.device.createBuffer({
size: indices.byteLength,
usage: GPUBufferUsage.INDEX | GPUBufferUsage.COPY_DST,
});
this.device.queue.writeBuffer(this.positionBuffer, 0, positions);
this.device.queue.writeBuffer(this.normalBuffer, 0, normals);
this.device.queue.writeBuffer(this.colorBuffer, 0, colors);
this.device.queue.writeBuffer(this.indexBuffer, 0, indices);
this.indexCount = indices.length;
}
createUniforms() {
const uniformSize =
64 + // modelMatrix (4x4)
64 + // viewMatrix (4x4)
64 + // projectionMatrix (4x4)
64 + // normalMatrix (4x4)
12 + // cameraPosition (vec3)
12 + // lightPosition (vec3)
12 + // lightColor (vec3)
4; // lightIntensity (float)
this.uniformBuffer = this.device.createBuffer({
size: uniformSize,
usage: GPUBufferUsage.UNIFORM | GPUBufferUsage.COPY_DST,
});
this.bindGroup = this.device.createBindGroup({
layout: this.pipeline.getBindGroupLayout(0),
entries: [{ binding: 0, resource: { buffer: this.uniformBuffer } }]
});
// Create depth texture
this.depthTexture = this.device.createTexture({
size: [this.canvas.width, this.canvas.height],
format: 'depth24plus',
usage: GPUTextureUsage.RENDER_ATTACHMENT,
});
}
// Matrix helper functions
createIdentityMatrix() {
return new Float32Array([
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1
]);
}
createTranslationMatrix(x, y, z) {
return new Float32Array([
1, 0, 0, 0,
0, 1, 0, 0,
0, 0, 1, 0,
x, y, z, 1
]);
}
createRotationMatrix(angleX, angleY, angleZ) {
const cosX = Math.cos(angleX), sinX = Math.sin(angleX);
const cosY = Math.cos(angleY), sinY = Math.sin(angleY);
const cosZ = Math.cos(angleZ), sinZ = Math.sin(angleZ);
const rotX = new Float32Array([
1, 0, 0, 0,
0, cosX, sinX, 0,
0, -sinX, cosX, 0,
0, 0, 0, 1
]);
const rotY = new Float32Array([
cosY, 0, -sinY, 0,
0, 1, 0, 0,
sinY, 0, cosY, 0,
0, 0, 0, 1
]);
const rotZ = new Float32Array([
cosZ, sinZ, 0, 0,
-sinZ, cosZ, 0, 0,
0, 0, 1, 0,
0, 0, 0, 1
]);
return this.multiplyMatrices(this.multiplyMatrices(rotX, rotY), rotZ);
}
createPerspectiveMatrix(fov, aspect, near, far) {
const f = 1.0 / Math.tan(fov / 2);
const rangeInv = 1 / (near - far);
return new Float32Array([
f / aspect, 0, 0, 0,
0, f, 0, 0,
0, 0, (near + far) * rangeInv, -1,
0, 0, near * far * rangeInv * 2, 0
]);
}
multiplyMatrices(a, b) {
const result = new Float32Array(16);
for (let i = 0; i < 4; i++) {
for (let j = 0; j < 4; j++) {
result[i * 4 + j] = 0;
for (let k = 0; k < 4; k++) {
result[i * 4 + j] += a[i * 4 + k] * b[k * 4 + j];
}
}
}
return result;
}
updateUniforms(time) {
const modelMatrix = this.multiplyMatrices(
this.createTranslationMatrix(0, 0, 0),
this.createRotationMatrix(time * 0.5, time * 0.3, time * 0.2)
);
const viewMatrix = this.createIdentityMatrix();
viewMatrix[14] = -5; // Move camera back
const projectionMatrix = this.createPerspectiveMatrix(
this.camera.fov,
this.camera.aspect,
this.camera.near,
this.camera.far
);
const uniformData = new Float32Array([
...modelMatrix,
...viewMatrix,
...projectionMatrix,
...modelMatrix, // Normal matrix (simplified)
...this.camera.position,
2, 2, 2, // Light position
1, 1, 1, // Light color
1.0 // Light intensity
]);
this.device.queue.writeBuffer(this.uniformBuffer, 0, uniformData);
}
render(time) {
this.updateUniforms(time);
const commandEncoder = this.device.createCommandEncoder();
const textureView = this.context.getCurrentTexture().createView();
const renderPass = commandEncoder.beginRenderPass({
colorAttachments: [{
view: textureView,
clearValue: { r: 0.1, g: 0.1, b: 0.1, a: 1.0 },
loadOp: 'clear',
storeOp: 'store',
}],
depthStencilAttachment: {
view: this.depthTexture.createView(),
depthClearValue: 1.0,
depthLoadOp: 'clear',
depthStoreOp: 'store',
},
});
renderPass.setPipeline(this.pipeline);
renderPass.setBindGroup(0, this.bindGroup);
renderPass.setVertexBuffer(0, this.positionBuffer);
renderPass.setVertexBuffer(1, this.normalBuffer);
renderPass.setVertexBuffer(2, this.colorBuffer);
renderPass.setIndexBuffer(this.indexBuffer, 'uint16');
renderPass.drawIndexed(this.indexCount);
renderPass.end();
this.device.queue.submit([commandEncoder.finish()]);
}
async start() {
await this.init();
const frame = (time) => {
this.render(time * 0.001); // Convert to seconds
requestAnimationFrame(frame);
};
frame(0);
}
}
// Usage
async function render3DScene() {
const canvas = document.getElementById('canvas');
const scene = new Scene3D(canvas);
try {
await scene.start();
} catch (error) {
console.error('3D scene initialization failed:', error);
}
}
if (navigator.gpu) {
render3DScene();
}