three#Matrix4 JavaScript Examples
The following examples show how to use
three#Matrix4.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: utils.js From cga.js with MIT License | 6 votes |
export function toMesh(obj, materialOption) {
var renderObj = null;
if (obj instanceof cga.Point || obj.isVec3) {
var geometry = new BufferGeometry()
geometry.setAttribute('position', new Float32BufferAttribute([obj.x, obj.y, obj.z], 3));
var material = new PointsMaterial({ size: 5, sizeAttenuation: false, color: 0x0ff0f0, alphaTest: 0.9, transparent: true });
renderObj = new Points(geometry, material);
} else if (obj instanceof cga.Line) {
var geometry = new Geometry()
var v1 = obj.direction.clone().multiplyScalar(10000).add(obj.origin);
var v2 = obj.direction.clone().multiplyScalar(-10000).add(obj.origin);
geometry.vertices.push(v1, v2);
var material = new LineBasicMaterial({ color: 0xffff8f });
renderObj = new Line(geometry, material);
} else if (obj instanceof cga.Ray) {
var geometry = new Geometry()
var v1 = obj.direction.clone().multiplyScalar(10000).add(obj.origin);
geometry.vertices.push(obj.origin, v1);
var material = new LineBasicMaterial({ color: 0xff8fff });
renderObj = new Line(geometry, material);
} else if (obj instanceof cga.Segment) {
var geometry = new Geometry()
geometry.vertices.push(obj.p0, obj.p1);
var material = new LineBasicMaterial({ color: 0x8fffff });
renderObj = new Line(geometry, material);
} else if (obj instanceof cga.Triangle) {
var geometry = new Geometry()
geometry.vertices = [...obj];
geometry.faces.push(new Face3(0, 1, 2))
var material = new MeshBasicMaterial({ color: 0x8f8fff, side: DoubleSide });
renderObj = new Mesh(geometry, material);
}
else if (obj instanceof cga.Polyline) {
var geometry = new Geometry()
geometry.vertices.push(...obj);
var material = new LineBasicMaterial({ color: 0xff8fff });
renderObj = new Line(geometry, material);
} else if (obj instanceof cga.Polygon) {
} else if (obj instanceof cga.Circle) {
var geometry = new Geometry()
var radius = obj.radius;
for (let i = 0; i <= 128; i++) {
var p = new Vector3();
p.x = radius * Math.cos(Math.PI / 64 * i);
p.y = radius * Math.sin(Math.PI / 64 * i);
geometry.vertices.push(p);
}
var quaternion = getQuaternionForm2V(new Vector3(0, 0, 1), obj.normal);
var mat4 = new Matrix4();
mat4.makeRotationFromQuaternion(quaternion);
geometry.applyMatrix(mat4);
geometry.translate(obj.center.x, obj.center.y, obj.center.z);
var material = new LineBasicMaterial({ color: 0x8fffff });
renderObj = new Line(geometry, material);
renderObj.add(new toMesh(obj.center))
renderObj.add(new toMesh(new cga.Ray(obj.center, obj.normal)))
}
else if (obj instanceof cga.Disk) {
var geometry = new CircleGeometry(obj.radius, 128)
var material = new MeshBasicMaterial({ color: 0x8f8fff, side: DoubleSide });
var quaternion = getQuaternionForm2V(new Vector3(0, 0, 1), obj.normal);
var mat4 = new Matrix4();
mat4.makeRotationFromQuaternion(quaternion);
geometry.applyMatrix4(mat4);
geometry.translate(obj.center.x, obj.center.y, obj.center.z);
renderObj = new Mesh(geometry, material);
renderObj.add(new toMesh(obj.center))
renderObj.add(new toMesh(new cga.Ray(obj.center, obj.normal)))
}
return renderObj;
}
Example #2
Source File: DragControls.js From geometry_3d with MIT License | 6 votes |
constructor(_objects, _camera) {
this._objects = _objects;
this._camera = _camera;
this._plane = new Plane();
this._raycaster = new Raycaster();
_mouse = new Vector2();
this._offset = new Vector3();
this._intersection = new Vector3();
this._worldPosition = new Vector3();
this._inverseMatrix = new Matrix4();
this._intersections = [];
this._selected = null;
}
Example #3
Source File: half_edge.js From architect3d with MIT License | 6 votes |
/**
* Calculate the transformation matrix for the edge (front/back) baesd on the parameters.
* @param {Matrix4} transform The matrix reference in which the transformation is stored
* @param {Matrix4} invTransform The inverse of the transform that is stored in the invTransform
* @param {Vector2} start The starting point location
* @param {Vector2} end The ending point location
* @see https://threejs.org/docs/#api/en/math/Matrix4
* @see https://threejs.org/docs/#api/en/math/Vector2
*/
computeTransforms(transform, invTransform, start, end)
{
var v1 = start;
var v2 = end;
var angle = Utils.angle(new Vector2(1, 0), new Vector2(v2.x - v1.x, v2.y - v1.y));
var tt = new Matrix4();
var tr = new Matrix4();
tt.makeTranslation(-v1.x, 0, -v1.y);
tr.makeRotationY(-angle);
transform.multiplyMatrices(tr, tt);
invTransform.getInverse(transform);
}
Example #4
Source File: roof_item.js From architect3d with MIT License | 6 votes |
constructor(model, metadata, geometry, material, position, rotation, scale, isgltf=false)
{
super(model, metadata, geometry, material, position, rotation, scale, isgltf);
this.allowRotate = false;
this.boundToFloor = false;
this._freePosition = false;
if(this.geometry)
{
this.geometry.applyMatrix(new Matrix4().makeTranslation(-0.5 * (this.geometry.boundingBox.max.x + this.geometry.boundingBox.min.x), -0.5 * (this.geometry.boundingBox.max.y - this.geometry.boundingBox.min.y),-0.5 * (this.geometry.boundingBox.max.z + this.geometry.boundingBox.min.z)));
this.geometry.computeBoundingBox();
}
this.halfSize = this.objectHalfSize();
this.canvasPlaneWH.position.set(0, this.getHeight() * -0.5, this.getDepth()*0.5);
this.canvasPlaneWD.position.set(0, -this.getHeight(), 0);
var co = this.closestCeilingPoint();
this.moveToPosition(co);
}
Example #5
Source File: item.js From architect3d with MIT License | 6 votes |
/**
* returns the 2d corners of the bounding polygon
*
* offset is Vector3 (used for getting corners of object at a new position)
*
* TODO: handle rotated objects better!
*/
getCorners(xDim, yDim, position)
{
position = position || this.position;
var halfSize = this.halfSize.clone();
var c1 = new Vector3(-halfSize.x, 0, -halfSize.z);
var c2 = new Vector3(halfSize.x, 0, -halfSize.z);
var c3 = new Vector3(halfSize.x, 0, halfSize.z);
var c4 = new Vector3(-halfSize.x, 0, halfSize.z);
var transform = new Matrix4();
// console.log(this.rotation.y);
transform.makeRotationY(this.rotation.y); // + Math.PI/2)
c1.applyMatrix4(transform);
c2.applyMatrix4(transform);
c3.applyMatrix4(transform);
c4.applyMatrix4(transform);
c1.add(position);
c2.add(position);
c3.add(position);
c4.add(position);
// halfSize.applyMatrix4(transform);
// var min = position.clone().sub(halfSize);
// var max = position.clone().add(halfSize);
var corners = [{ x: c1.x, y: c1.z },{ x: c2.x, y: c2.z },{ x: c3.x, y: c3.z },{ x: c4.x, y: c4.z }];
return corners;
}
Example #6
Source File: CombinedCamera.js From BlueMapWeb with MIT License | 5 votes |
updateProjectionMatrix() {
if (!this.ortographicProjection)
this.ortographicProjection = new Matrix4();
if (!this.perspectiveProjection)
this.perspectiveProjection = new Matrix4();
if (!this.data)
this.data = {};
//copied from PerspectiveCamera
const near = this.near;
let top = near * Math.tan( MathUtils.DEG2RAD * 0.5 * this.fov ) / this.zoom;
let height = 2 * top;
let width = this.aspect * height;
let left = - 0.5 * width;
const view = this.view;
if ( this.view !== null && this.view.enabled ) {
const fullWidth = view.fullWidth,
fullHeight = view.fullHeight;
left += view.offsetX * width / fullWidth;
top -= view.offsetY * height / fullHeight;
width *= view.width / fullWidth;
height *= view.height / fullHeight;
}
const skew = this.filmOffset;
if ( skew !== 0 ) left += near * skew / this.getFilmWidth();
// this part different to PerspectiveCamera
let normalizedOrtho = -Math.pow(this.ortho - 1, 6) + 1;
let orthoTop = Math.max(this.distance, 0.0001) * Math.tan( MathUtils.DEG2RAD * 0.5 * this.fov ) / this.zoom;
let orthoHeight = 2 * orthoTop;
let orthoWidth = this.aspect * orthoHeight;
let orthoLeft = - 0.5 * orthoWidth;
this.perspectiveProjection.makePerspective( left, left + width, top, top - height, near, this.far );
this.ortographicProjection.makeOrthographic( orthoLeft, orthoLeft + orthoWidth, orthoTop, orthoTop - orthoHeight, near, this.far );
for (let i = 0; i < 16; i++){
this.projectionMatrix.elements[i] = (this.perspectiveProjection.elements[i] * (1 - normalizedOrtho)) + (this.ortographicProjection.elements[i] * normalizedOrtho);
}
// to here
this.projectionMatrixInverse.copy( this.projectionMatrix ).invert();
}
Example #7
Source File: Glasses.js From sketch-webcam with MIT License | 5 votes |
update(prediction) {
const { mesh, scaledMesh } = prediction;
const { resolution } = store.state.webcam;
const p0 = new Vector3();
p0.fromArray(scaledMesh[5]);
p0.x = p0.x - resolution.x * 0.5;
p0.y = p0.y - resolution.y * 0.5;
const p1 = new Vector3();
const p2 = new Vector3();
const p3 = new Vector3();
p1.fromArray(mesh[5]);
p2.fromArray(mesh[44]);
p3.fromArray(mesh[274]);
const x = p3
.clone()
.sub(p2)
.normalize();
const y = p1
.clone()
.sub(p2)
.normalize();
const z = new Vector3().crossVectors(x, y);
const y2 = new Vector3().crossVectors(x, z).normalize();
const z2 = new Vector3().crossVectors(x, y2).normalize();
const rotateMat = new Matrix4().makeBasis(x, y2, z2);
this.rotation.setFromRotationMatrix(rotateMat);
const normal = p0.clone().normalize();
const x3 = ((p0.x / -resolution.x) * this.size.x) / this.imgRatio.x;
const y3 = (((p0.y + 10) / -resolution.y) * this.size.y) / this.imgRatio.y;
const z3 = normal.z * (x3 / normal.x) - 2;
this.anchor.set(x3, y3, z3);
const a = this.anchor
.clone()
.sub(this.position)
.multiplyScalar(0.4);
this.a.add(a);
this.a.add(this.a.clone().multiplyScalar(-0.4));
this.position.add(this.a);
const p4 = new Vector3().fromArray(scaledMesh[10]);
const x4 = ((p4.x / -resolution.x) * this.size.x) / this.imgRatio.x;
const y4 = ((p4.y / -resolution.y) * this.size.y) / this.imgRatio.y;
const z4 = normal.z * (x4 / normal.x);
const p4a = new Vector3(x4, y4, z4);
const p5 = new Vector3().fromArray(scaledMesh[152]);
const x5 = ((p5.x / -resolution.x) * this.size.x) / this.imgRatio.x;
const y5 = ((p5.y / -resolution.y) * this.size.y) / this.imgRatio.y;
const z5 = normal.z * (x5 / normal.x);
const p5a = new Vector3(x5, y5, z5);
const sv = p4a.distanceTo(p5a) / 40;
this.sa += (sv - this.sv) * 0.1;
this.sa += this.sa * -0.4;
this.sa = Math.min(this.sa, 1);
this.sv += this.sa;
this.scale.set(this.sv, this.sv, this.sv);
}
Example #8
Source File: CSS3DRenderer.js From Computer-Graphics with MIT License | 5 votes |
_matrix2 = new Matrix4()
Example #9
Source File: CSS3DRenderer.js From Computer-Graphics with MIT License | 5 votes |
_matrix = new Matrix4()
Example #10
Source File: DragControls.js From Computer-Graphics with MIT License | 5 votes |
_inverseMatrix = new Matrix4()
Example #11
Source File: raycastTraverse.js From 3DTilesRendererJS with Apache License 2.0 | 5 votes |
_mat = new Matrix4()
Example #12
Source File: TilesRenderer.js From 3DTilesRendererJS with Apache License 2.0 | 5 votes |
tempMat2 = new Matrix4()
Example #13
Source File: TilesGroup.js From 3DTilesRendererJS with Apache License 2.0 | 5 votes |
tempMat = new Matrix4()
Example #14
Source File: B3DMLoader.js From 3DTilesRendererJS with Apache License 2.0 | 5 votes |
constructor( manager = DefaultLoadingManager ) {
super();
this.manager = manager;
this.adjustmentTransform = new Matrix4();
}
Example #15
Source File: CMPTLoader.js From 3DTilesRendererJS with Apache License 2.0 | 5 votes |
constructor( manager = DefaultLoadingManager ) {
super();
this.manager = manager;
this.adjustmentTransform = new Matrix4();
}
Example #16
Source File: I3DMLoader.js From 3DTilesRendererJS with Apache License 2.0 | 5 votes |
constructor( manager = DefaultLoadingManager ) {
super();
this.manager = manager;
this.adjustmentTransform = new Matrix4();
}
Example #17
Source File: I3DMLoader.js From 3DTilesRendererJS with Apache License 2.0 | 5 votes |
tempMat = new Matrix4()
Example #18
Source File: TilesRenderer.js From 3DTilesRendererJS with Apache License 2.0 | 5 votes |
tempMat = new Matrix4()
Example #19
Source File: SSRShader.js From Computer-Graphics with MIT License | 4 votes |
SSRShader = {
defines: {
MAX_STEP: 0,
PERSPECTIVE_CAMERA: true,
DISTANCE_ATTENUATION: true,
FRESNEL: true,
INFINITE_THICK: false,
SELECTIVE: false,
},
uniforms: {
'tDiffuse': { value: null },
'tNormal': { value: null },
'tMetalness': { value: null },
'tDepth': { value: null },
'cameraNear': { value: null },
'cameraFar': { value: null },
'resolution': { value: new Vector2() },
'cameraProjectionMatrix': { value: new Matrix4() },
'cameraInverseProjectionMatrix': { value: new Matrix4() },
'opacity': { value: .5 },
'maxDistance': { value: 180 },
'cameraRange': { value: 0 },
'thickness': { value: .018 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}
`,
fragmentShader: /* glsl */`
// precision highp float;
precision highp sampler2D;
varying vec2 vUv;
uniform sampler2D tDepth;
uniform sampler2D tNormal;
uniform sampler2D tMetalness;
uniform sampler2D tDiffuse;
uniform float cameraRange;
uniform vec2 resolution;
uniform float opacity;
uniform float cameraNear;
uniform float cameraFar;
uniform float maxDistance;
uniform float thickness;
uniform mat4 cameraProjectionMatrix;
uniform mat4 cameraInverseProjectionMatrix;
#include <packing>
float pointToLineDistance(vec3 x0, vec3 x1, vec3 x2) {
//x0: point, x1: linePointA, x2: linePointB
//https://mathworld.wolfram.com/Point-LineDistance3-Dimensional.html
return length(cross(x0-x1,x0-x2))/length(x2-x1);
}
float pointPlaneDistance(vec3 point,vec3 planePoint,vec3 planeNormal){
// https://mathworld.wolfram.com/Point-PlaneDistance.html
//// https://en.wikipedia.org/wiki/Plane_(geometry)
//// http://paulbourke.net/geometry/pointlineplane/
float a=planeNormal.x,b=planeNormal.y,c=planeNormal.z;
float x0=point.x,y0=point.y,z0=point.z;
float x=planePoint.x,y=planePoint.y,z=planePoint.z;
float d=-(a*x+b*y+c*z);
float distance=(a*x0+b*y0+c*z0+d)/sqrt(a*a+b*b+c*c);
return distance;
}
float getDepth( const in vec2 uv ) {
return texture2D( tDepth, uv ).x;
}
float getViewZ( const in float depth ) {
#ifdef PERSPECTIVE_CAMERA
return perspectiveDepthToViewZ( depth, cameraNear, cameraFar );
#else
return orthographicDepthToViewZ( depth, cameraNear, cameraFar );
#endif
}
vec3 getViewPosition( const in vec2 uv, const in float depth/*clip space*/, const in float clipW ) {
vec4 clipPosition = vec4( ( vec3( uv, depth ) - 0.5 ) * 2.0, 1.0 );//ndc
clipPosition *= clipW; //clip
return ( cameraInverseProjectionMatrix * clipPosition ).xyz;//view
}
vec3 getViewNormal( const in vec2 uv ) {
return unpackRGBToNormal( texture2D( tNormal, uv ).xyz );
}
vec2 viewPositionToXY(vec3 viewPosition){
vec2 xy;
vec4 clip=cameraProjectionMatrix*vec4(viewPosition,1);
xy=clip.xy;//clip
float clipW=clip.w;
xy/=clipW;//NDC
xy=(xy+1.)/2.;//uv
xy*=resolution;//screen
return xy;
}
void main(){
#ifdef SELECTIVE
float metalness=texture2D(tMetalness,vUv).r;
if(metalness==0.) return;
#endif
float depth = getDepth( vUv );
float viewZ = getViewZ( depth );
if(-viewZ>=cameraFar) return;
float clipW = cameraProjectionMatrix[2][3] * viewZ+cameraProjectionMatrix[3][3];
vec3 viewPosition=getViewPosition( vUv, depth, clipW );
vec2 d0=gl_FragCoord.xy;
vec2 d1;
vec3 viewNormal=getViewNormal( vUv );
#ifdef PERSPECTIVE_CAMERA
vec3 viewIncidentDir=normalize(viewPosition);
vec3 viewReflectDir=reflect(viewIncidentDir,viewNormal);
#else
vec3 viewIncidentDir=vec3(0,0,-1);
vec3 viewReflectDir=reflect(viewIncidentDir,viewNormal);
#endif
float maxReflectRayLen=maxDistance/dot(-viewIncidentDir,viewNormal);
// dot(a,b)==length(a)*length(b)*cos(theta) // https://www.mathsisfun.com/algebra/vectors-dot-product.html
// if(a.isNormalized&&b.isNormalized) dot(a,b)==cos(theta)
// maxDistance/maxReflectRayLen=cos(theta)
// maxDistance/maxReflectRayLen==dot(a,b)
// maxReflectRayLen==maxDistance/dot(a,b)
vec3 d1viewPosition=viewPosition+viewReflectDir*maxReflectRayLen;
#ifdef PERSPECTIVE_CAMERA
if(d1viewPosition.z>-cameraNear){
//https://tutorial.math.lamar.edu/Classes/CalcIII/EqnsOfLines.aspx
float t=(-cameraNear-viewPosition.z)/viewReflectDir.z;
d1viewPosition=viewPosition+viewReflectDir*t;
}
#endif
d1=viewPositionToXY(d1viewPosition);
float totalLen=length(d1-d0);
float xLen=d1.x-d0.x;
float yLen=d1.y-d0.y;
float totalStep=max(abs(xLen),abs(yLen));
float xSpan=xLen/totalStep;
float ySpan=yLen/totalStep;
for(float i=0.;i<float(MAX_STEP);i++){
if(i>=totalStep) break;
vec2 xy=vec2(d0.x+i*xSpan,d0.y+i*ySpan);
if(xy.x<0.||xy.x>resolution.x||xy.y<0.||xy.y>resolution.y) break;
float s=length(xy-d0)/totalLen;
vec2 uv=xy/resolution;
float d = getDepth(uv);
float vZ = getViewZ( d );
if(-vZ>=cameraFar) continue;
float cW = cameraProjectionMatrix[2][3] * vZ+cameraProjectionMatrix[3][3];
vec3 vP=getViewPosition( uv, d, cW );
#ifdef PERSPECTIVE_CAMERA
// https://comp.nus.edu.sg/~lowkl/publications/lowk_persp_interp_techrep.pdf
float recipVPZ=1./viewPosition.z;
float viewReflectRayZ=1./(recipVPZ+s*(1./d1viewPosition.z-recipVPZ));
#else
float viewReflectRayZ=viewPosition.z+s*(d1viewPosition.z-viewPosition.z);
#endif
// if(viewReflectRayZ>vZ) continue; // will cause "npm run make-screenshot webgl_postprocessing_ssr" high probability hang.
// https://github.com/mrdoob/three.js/pull/21539#issuecomment-821061164
if(viewReflectRayZ<=vZ){
bool hit;
#ifdef INFINITE_THICK
hit=true;
#else
float away=pointToLineDistance(vP,viewPosition,d1viewPosition);
float minThickness;
vec2 xyNeighbor=xy;
xyNeighbor.x+=1.;
vec2 uvNeighbor=xyNeighbor/resolution;
vec3 vPNeighbor=getViewPosition(uvNeighbor,d,cW);
minThickness=vPNeighbor.x-vP.x;
minThickness*=3.;
float tk=max(minThickness,thickness);
hit=away<=tk;
#endif
if(hit){
vec3 vN=getViewNormal( uv );
if(dot(viewReflectDir,vN)>=0.) continue;
float distance=pointPlaneDistance(vP,viewPosition,viewNormal);
if(distance>maxDistance) break;
float op=opacity;
#ifdef DISTANCE_ATTENUATION
float ratio=1.-(distance/maxDistance);
float attenuation=ratio*ratio;
op=opacity*attenuation;
#endif
#ifdef FRESNEL
float fresnelCoe=(dot(viewIncidentDir,viewReflectDir)+1.)/2.;
op*=fresnelCoe;
#endif
vec4 reflectColor=texture2D(tDiffuse,uv);
gl_FragColor.xyz=reflectColor.xyz;
gl_FragColor.a=op;
break;
}
}
}
}
`
}
Example #20
Source File: SSAOShader.js From Computer-Graphics with MIT License | 4 votes |
SSAOShader = {
defines: {
'PERSPECTIVE_CAMERA': 1,
'KERNEL_SIZE': 32
},
uniforms: {
'tDiffuse': { value: null },
'tNormal': { value: null },
'tDepth': { value: null },
'tNoise': { value: null },
'kernel': { value: null },
'cameraNear': { value: null },
'cameraFar': { value: null },
'resolution': { value: new Vector2() },
'cameraProjectionMatrix': { value: new Matrix4() },
'cameraInverseProjectionMatrix': { value: new Matrix4() },
'kernelRadius': { value: 8 },
'minDistance': { value: 0.005 },
'maxDistance': { value: 0.05 },
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
uniform sampler2D tDiffuse;
uniform sampler2D tNormal;
uniform sampler2D tDepth;
uniform sampler2D tNoise;
uniform vec3 kernel[ KERNEL_SIZE ];
uniform vec2 resolution;
uniform float cameraNear;
uniform float cameraFar;
uniform mat4 cameraProjectionMatrix;
uniform mat4 cameraInverseProjectionMatrix;
uniform float kernelRadius;
uniform float minDistance; // avoid artifacts caused by neighbour fragments with minimal depth difference
uniform float maxDistance; // avoid the influence of fragments which are too far away
varying vec2 vUv;
#include <packing>
float getDepth( const in vec2 screenPosition ) {
return texture2D( tDepth, screenPosition ).x;
}
float getLinearDepth( const in vec2 screenPosition ) {
#if PERSPECTIVE_CAMERA == 1
float fragCoordZ = texture2D( tDepth, screenPosition ).x;
float viewZ = perspectiveDepthToViewZ( fragCoordZ, cameraNear, cameraFar );
return viewZToOrthographicDepth( viewZ, cameraNear, cameraFar );
#else
return texture2D( tDepth, screenPosition ).x;
#endif
}
float getViewZ( const in float depth ) {
#if PERSPECTIVE_CAMERA == 1
return perspectiveDepthToViewZ( depth, cameraNear, cameraFar );
#else
return orthographicDepthToViewZ( depth, cameraNear, cameraFar );
#endif
}
vec3 getViewPosition( const in vec2 screenPosition, const in float depth, const in float viewZ ) {
float clipW = cameraProjectionMatrix[2][3] * viewZ + cameraProjectionMatrix[3][3];
vec4 clipPosition = vec4( ( vec3( screenPosition, depth ) - 0.5 ) * 2.0, 1.0 );
clipPosition *= clipW; // unprojection.
return ( cameraInverseProjectionMatrix * clipPosition ).xyz;
}
vec3 getViewNormal( const in vec2 screenPosition ) {
return unpackRGBToNormal( texture2D( tNormal, screenPosition ).xyz );
}
void main() {
float depth = getDepth( vUv );
float viewZ = getViewZ( depth );
vec3 viewPosition = getViewPosition( vUv, depth, viewZ );
vec3 viewNormal = getViewNormal( vUv );
vec2 noiseScale = vec2( resolution.x / 4.0, resolution.y / 4.0 );
vec3 random = vec3( texture2D( tNoise, vUv * noiseScale ).r );
// compute matrix used to reorient a kernel vector
vec3 tangent = normalize( random - viewNormal * dot( random, viewNormal ) );
vec3 bitangent = cross( viewNormal, tangent );
mat3 kernelMatrix = mat3( tangent, bitangent, viewNormal );
float occlusion = 0.0;
for ( int i = 0; i < KERNEL_SIZE; i ++ ) {
vec3 sampleVector = kernelMatrix * kernel[ i ]; // reorient sample vector in view space
vec3 samplePoint = viewPosition + ( sampleVector * kernelRadius ); // calculate sample point
vec4 samplePointNDC = cameraProjectionMatrix * vec4( samplePoint, 1.0 ); // project point and calculate NDC
samplePointNDC /= samplePointNDC.w;
vec2 samplePointUv = samplePointNDC.xy * 0.5 + 0.5; // compute uv coordinates
float realDepth = getLinearDepth( samplePointUv ); // get linear depth from depth texture
float sampleDepth = viewZToOrthographicDepth( samplePoint.z, cameraNear, cameraFar ); // compute linear depth of the sample view Z value
float delta = sampleDepth - realDepth;
if ( delta > minDistance && delta < maxDistance ) { // if fragment is before sample point, increase occlusion
occlusion += 1.0;
}
}
occlusion = clamp( occlusion / float( KERNEL_SIZE ), 0.0, 1.0 );
gl_FragColor = vec4( vec3( 1.0 - occlusion ), 1.0 );
}`
}
Example #21
Source File: SAOShader.js From Computer-Graphics with MIT License | 4 votes |
SAOShader = {
defines: {
'NUM_SAMPLES': 7,
'NUM_RINGS': 4,
'NORMAL_TEXTURE': 0,
'DIFFUSE_TEXTURE': 0,
'DEPTH_PACKING': 1,
'PERSPECTIVE_CAMERA': 1
},
uniforms: {
'tDepth': { value: null },
'tDiffuse': { value: null },
'tNormal': { value: null },
'size': { value: new Vector2( 512, 512 ) },
'cameraNear': { value: 1 },
'cameraFar': { value: 100 },
'cameraProjectionMatrix': { value: new Matrix4() },
'cameraInverseProjectionMatrix': { value: new Matrix4() },
'scale': { value: 1.0 },
'intensity': { value: 0.1 },
'bias': { value: 0.5 },
'minResolution': { value: 0.0 },
'kernelRadius': { value: 100.0 },
'randomSeed': { value: 0.0 }
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}`,
fragmentShader: /* glsl */`
#include <common>
varying vec2 vUv;
#if DIFFUSE_TEXTURE == 1
uniform sampler2D tDiffuse;
#endif
uniform sampler2D tDepth;
#if NORMAL_TEXTURE == 1
uniform sampler2D tNormal;
#endif
uniform float cameraNear;
uniform float cameraFar;
uniform mat4 cameraProjectionMatrix;
uniform mat4 cameraInverseProjectionMatrix;
uniform float scale;
uniform float intensity;
uniform float bias;
uniform float kernelRadius;
uniform float minResolution;
uniform vec2 size;
uniform float randomSeed;
// RGBA depth
#include <packing>
vec4 getDefaultColor( const in vec2 screenPosition ) {
#if DIFFUSE_TEXTURE == 1
return texture2D( tDiffuse, vUv );
#else
return vec4( 1.0 );
#endif
}
float getDepth( const in vec2 screenPosition ) {
#if DEPTH_PACKING == 1
return unpackRGBAToDepth( texture2D( tDepth, screenPosition ) );
#else
return texture2D( tDepth, screenPosition ).x;
#endif
}
float getViewZ( const in float depth ) {
#if PERSPECTIVE_CAMERA == 1
return perspectiveDepthToViewZ( depth, cameraNear, cameraFar );
#else
return orthographicDepthToViewZ( depth, cameraNear, cameraFar );
#endif
}
vec3 getViewPosition( const in vec2 screenPosition, const in float depth, const in float viewZ ) {
float clipW = cameraProjectionMatrix[2][3] * viewZ + cameraProjectionMatrix[3][3];
vec4 clipPosition = vec4( ( vec3( screenPosition, depth ) - 0.5 ) * 2.0, 1.0 );
clipPosition *= clipW; // unprojection.
return ( cameraInverseProjectionMatrix * clipPosition ).xyz;
}
vec3 getViewNormal( const in vec3 viewPosition, const in vec2 screenPosition ) {
#if NORMAL_TEXTURE == 1
return unpackRGBToNormal( texture2D( tNormal, screenPosition ).xyz );
#else
return normalize( cross( dFdx( viewPosition ), dFdy( viewPosition ) ) );
#endif
}
float scaleDividedByCameraFar;
float minResolutionMultipliedByCameraFar;
float getOcclusion( const in vec3 centerViewPosition, const in vec3 centerViewNormal, const in vec3 sampleViewPosition ) {
vec3 viewDelta = sampleViewPosition - centerViewPosition;
float viewDistance = length( viewDelta );
float scaledScreenDistance = scaleDividedByCameraFar * viewDistance;
return max(0.0, (dot(centerViewNormal, viewDelta) - minResolutionMultipliedByCameraFar) / scaledScreenDistance - bias) / (1.0 + pow2( scaledScreenDistance ) );
}
// moving costly divides into consts
const float ANGLE_STEP = PI2 * float( NUM_RINGS ) / float( NUM_SAMPLES );
const float INV_NUM_SAMPLES = 1.0 / float( NUM_SAMPLES );
float getAmbientOcclusion( const in vec3 centerViewPosition ) {
// precompute some variables require in getOcclusion.
scaleDividedByCameraFar = scale / cameraFar;
minResolutionMultipliedByCameraFar = minResolution * cameraFar;
vec3 centerViewNormal = getViewNormal( centerViewPosition, vUv );
// jsfiddle that shows sample pattern: https://jsfiddle.net/a16ff1p7/
float angle = rand( vUv + randomSeed ) * PI2;
vec2 radius = vec2( kernelRadius * INV_NUM_SAMPLES ) / size;
vec2 radiusStep = radius;
float occlusionSum = 0.0;
float weightSum = 0.0;
for( int i = 0; i < NUM_SAMPLES; i ++ ) {
vec2 sampleUv = vUv + vec2( cos( angle ), sin( angle ) ) * radius;
radius += radiusStep;
angle += ANGLE_STEP;
float sampleDepth = getDepth( sampleUv );
if( sampleDepth >= ( 1.0 - EPSILON ) ) {
continue;
}
float sampleViewZ = getViewZ( sampleDepth );
vec3 sampleViewPosition = getViewPosition( sampleUv, sampleDepth, sampleViewZ );
occlusionSum += getOcclusion( centerViewPosition, centerViewNormal, sampleViewPosition );
weightSum += 1.0;
}
if( weightSum == 0.0 ) discard;
return occlusionSum * ( intensity / weightSum );
}
void main() {
float centerDepth = getDepth( vUv );
if( centerDepth >= ( 1.0 - EPSILON ) ) {
discard;
}
float centerViewZ = getViewZ( centerDepth );
vec3 viewPosition = getViewPosition( vUv, centerDepth, centerViewZ );
float ambientOcclusion = getAmbientOcclusion( viewPosition );
gl_FragColor = getDefaultColor( vUv );
gl_FragColor.xyz *= 1.0 - ambientOcclusion;
}`
}
Example #22
Source File: CSS2DRenderer.js From BlueMapWeb with MIT License | 4 votes |
CSS2DRenderer = function (events = null) {
var _this = this;
var _width, _height;
var _widthHalf, _heightHalf;
var vector = new Vector3();
var viewMatrix = new Matrix4();
var viewProjectionMatrix = new Matrix4();
var cache = {
objects: new WeakMap()
};
var domElement = document.createElement( 'div' );
domElement.style.overflow = 'hidden';
this.domElement = domElement;
this.events = events;
this.getSize = function () {
return {
width: _width,
height: _height
};
};
this.setSize = function ( width, height ) {
_width = width;
_height = height;
_widthHalf = _width / 2;
_heightHalf = _height / 2;
domElement.style.width = width + 'px';
domElement.style.height = height + 'px';
};
var renderObject = function ( object, scene, camera, parentVisible ) {
if ( object instanceof CSS2DObject ) {
object.events = _this.events;
object.onBeforeRender( _this, scene, camera );
vector.setFromMatrixPosition( object.matrixWorld );
vector.applyMatrix4( viewProjectionMatrix );
var element = object.element;
var style = 'translate(' + ( vector.x * _widthHalf + _widthHalf - object.anchor.x) + 'px,' + ( - vector.y * _heightHalf + _heightHalf - object.anchor.y ) + 'px)';
element.style.WebkitTransform = style;
element.style.MozTransform = style;
element.style.oTransform = style;
element.style.transform = style;
element.style.display = ( parentVisible && object.visible && vector.z >= - 1 && vector.z <= 1 && element.style.opacity !== "0" ) ? '' : 'none';
var objectData = {
distanceToCameraSquared: getDistanceToSquared( camera, object )
};
cache.objects.set( object, objectData );
if ( element.parentNode !== domElement ) {
domElement.appendChild( element );
}
object.onAfterRender( _this, scene, camera );
}
for ( var i = 0, l = object.children.length; i < l; i ++ ) {
renderObject( object.children[ i ], scene, camera, parentVisible && object.visible );
}
};
var getDistanceToSquared = function () {
var a = new Vector3();
var b = new Vector3();
return function ( object1, object2 ) {
a.setFromMatrixPosition( object1.matrixWorld );
b.setFromMatrixPosition( object2.matrixWorld );
return a.distanceToSquared( b );
};
}();
var filterAndFlatten = function ( scene ) {
var result = [];
scene.traverse( function ( object ) {
if ( object instanceof CSS2DObject ) result.push( object );
} );
return result;
};
var zOrder = function ( scene ) {
var sorted = filterAndFlatten( scene ).sort( function ( a, b ) {
var distanceA = cache.objects.get( a ).distanceToCameraSquared;
var distanceB = cache.objects.get( b ).distanceToCameraSquared;
return distanceA - distanceB;
} );
var zMax = sorted.length;
for ( var i = 0, l = sorted.length; i < l; i ++ ) {
sorted[ i ].element.style.zIndex = zMax - i;
}
};
this.render = function ( scene, camera ) {
if ( scene.autoUpdate === true ) scene.updateMatrixWorld();
if ( camera.parent === null ) camera.updateMatrixWorld();
viewMatrix.copy( camera.matrixWorldInverse );
viewProjectionMatrix.multiplyMatrices( camera.projectionMatrix, viewMatrix );
renderObject( scene, scene, camera, true );
zOrder( scene );
};
}
Example #23
Source File: i3dmExample.js From 3DTilesRendererJS with Apache License 2.0 | 4 votes |
function init() {
scene = new Scene();
// primary camera view
renderer = new WebGLRenderer( { antialias: true } );
renderer.setPixelRatio( window.devicePixelRatio );
renderer.setSize( window.innerWidth, window.innerHeight );
renderer.setClearColor( 0x151c1f );
renderer.shadowMap.enabled = true;
renderer.shadowMap.type = PCFSoftShadowMap;
renderer.outputEncoding = sRGBEncoding;
document.body.appendChild( renderer.domElement );
camera = new PerspectiveCamera( 60, window.innerWidth / window.innerHeight, 1, 4000 );
camera.position.set( 100, 100, 100 );
// controls
controls = new OrbitControls( camera, renderer.domElement );
controls.screenSpacePanning = false;
controls.minDistance = 1;
controls.maxDistance = 2000;
// lights
dirLight = new DirectionalLight( 0xffffff, 1.25 );
dirLight.position.set( 1, 2, 3 ).multiplyScalar( 40 );
dirLight.castShadow = true;
dirLight.shadow.bias = - 0.01;
dirLight.shadow.mapSize.setScalar( 2048 );
const shadowCam = dirLight.shadow.camera;
shadowCam.left = - 200;
shadowCam.bottom = - 200;
shadowCam.right = 200;
shadowCam.top = 200;
shadowCam.updateProjectionMatrix();
scene.add( dirLight );
const ambLight = new AmbientLight( 0xffffff, 0.05 );
scene.add( ambLight );
new I3DMLoader()
.load( 'https://raw.githubusercontent.com/CesiumGS/3d-tiles-samples/main/1.0/TilesetWithTreeBillboards/tree.i3dm' )
.then( res => {
let instance = null;
res.scene.traverse( c => {
if ( ! instance && c.isInstancedMesh ) {
instance = c;
}
} );
if ( instance ) {
res.scene.updateMatrixWorld( true );
const pos = new Vector3();
const quat = new Quaternion();
const sca = new Vector3();
const mat = new Matrix4();
const averagePos = new Vector3();
for ( let i = 0, l = instance.count; i < l; i ++ ) {
instance.getMatrixAt( i, mat );
mat.premultiply( instance.matrixWorld );
mat.decompose( pos, quat, sca );
averagePos.add( pos );
}
averagePos.divideScalar( instance.count );
controls.target.copy( averagePos );
camera.position.add( averagePos );
controls.update();
}
console.log( res );
scene.add( res.scene );
} );
onWindowResize();
window.addEventListener( 'resize', onWindowResize, false );
}
Example #24
Source File: SSRrShader.js From Computer-Graphics with MIT License | 4 votes |
SSRrShader = {
defines: {
MAX_STEP: 0,
PERSPECTIVE_CAMERA: true,
SPECULAR: true,
FILL_HOLE: true,
INFINITE_THICK: false,
},
uniforms: {
'tDiffuse': { value: null },
'tSpecular': { value: null },
'tNormalSelects': { value: null },
'tRefractive': { value: null },
'tDepth': { value: null },
'tDepthSelects': { value: null },
'cameraNear': { value: null },
'cameraFar': { value: null },
'resolution': { value: new Vector2() },
'cameraProjectionMatrix': { value: new Matrix4() },
'cameraInverseProjectionMatrix': { value: new Matrix4() },
'ior': { value: 1.03 },
'cameraRange': { value: 0 },
'maxDistance': { value: 180 },
'surfDist': { value: .007 },
},
vertexShader: /* glsl */`
varying vec2 vUv;
void main() {
vUv = uv;
gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );
}
`,
fragmentShader: /* glsl */`
// precision highp float;
precision highp sampler2D;
varying vec2 vUv;
uniform sampler2D tDepth;
uniform sampler2D tDepthSelects;
uniform sampler2D tNormalSelects;
uniform sampler2D tRefractive;
uniform sampler2D tDiffuse;
uniform sampler2D tSpecular;
uniform float cameraRange;
uniform vec2 resolution;
uniform float cameraNear;
uniform float cameraFar;
uniform float ior;
uniform mat4 cameraProjectionMatrix;
uniform mat4 cameraInverseProjectionMatrix;
uniform float maxDistance;
uniform float surfDist;
#include <packing>
float pointToLineDistance(vec3 x0, vec3 x1, vec3 x2) {
//x0: point, x1: linePointA, x2: linePointB
//https://mathworld.wolfram.com/Point-LineDistance3-Dimensional.html
return length(cross(x0-x1,x0-x2))/length(x2-x1);
}
float pointPlaneDistance(vec3 point,vec3 planePoint,vec3 planeNormal){
// https://mathworld.wolfram.com/Point-PlaneDistance.html
//// https://en.wikipedia.org/wiki/Plane_(geometry)
//// http://paulbourke.net/geometry/pointlineplane/
float a=planeNormal.x,b=planeNormal.y,c=planeNormal.z;
float x0=point.x,y0=point.y,z0=point.z;
float x=planePoint.x,y=planePoint.y,z=planePoint.z;
float d=-(a*x+b*y+c*z);
float distance=(a*x0+b*y0+c*z0+d)/sqrt(a*a+b*b+c*c);
return distance;
}
float getDepth( const in vec2 uv ) {
return texture2D( tDepth, uv ).x;
}
float getDepthSelects( const in vec2 uv ) {
return texture2D( tDepthSelects, uv ).x;
}
float getViewZ( const in float depth ) {
#ifdef PERSPECTIVE_CAMERA
return perspectiveDepthToViewZ( depth, cameraNear, cameraFar );
#else
return orthographicDepthToViewZ( depth, cameraNear, cameraFar );
#endif
}
vec3 getViewPosition( const in vec2 uv, const in float depth/*clip space*/, const in float clipW ) {
vec4 clipPosition = vec4( ( vec3( uv, depth ) - 0.5 ) * 2.0, 1.0 );//ndc
clipPosition *= clipW; //clip
return ( cameraInverseProjectionMatrix * clipPosition ).xyz;//view
}
vec3 getViewNormalSelects( const in vec2 uv ) {
return unpackRGBToNormal( texture2D( tNormalSelects, uv ).xyz );
}
vec2 viewPositionToXY(vec3 viewPosition){
vec2 xy;
vec4 clip=cameraProjectionMatrix*vec4(viewPosition,1);
xy=clip.xy;//clip
float clipW=clip.w;
xy/=clipW;//NDC
xy=(xy+1.)/2.;//uv
xy*=resolution;//screen
return xy;
}
void setResultColor(vec2 uv){
vec4 refractColor=texture2D(tDiffuse,uv);
#ifdef SPECULAR
vec4 specularColor=texture2D(tSpecular,vUv);
gl_FragColor.xyz=mix(refractColor.xyz,vec3(1),specularColor.r);
// gl_FragColor.xyz=refractColor.xyz*(1.+specularColor.r*3.);
#else
gl_FragColor.xyz=refractColor.xyz;
#endif
gl_FragColor.a=1.;
}
void main(){
if(ior==1.) return; // Adding this line may have better performance, but more importantly, it can avoid display errors at the very edges of the model when IOR is equal to 1.
float refractive=texture2D(tRefractive,vUv).r;
if(refractive<=0.) return;
// gl_FragColor=vec4(0,0,.5,1);return;
vec3 viewNormalSelects=getViewNormalSelects( vUv );
// gl_FragColor=vec4(viewNormalSelects,1);return;
// if(viewNormalSelects.x<=0.&&viewNormalSelects.y<=0.&&viewNormalSelects.z<=0.) return;
float depth = getDepthSelects( vUv );
float viewZ = getViewZ( depth );
// if(-viewZ>=cameraFar) return;
float clipW = cameraProjectionMatrix[2][3] * viewZ+cameraProjectionMatrix[3][3];
vec3 viewPosition=getViewPosition( vUv, depth, clipW );
vec2 d0=gl_FragCoord.xy;
vec2 d1;
#ifdef PERSPECTIVE_CAMERA
vec3 viewIncidentDir=normalize(viewPosition);
#else
vec3 viewIncidentDir=vec3(0,0,-1);
#endif
vec3 viewRefractDir=refract(viewIncidentDir,viewNormalSelects,1./ior);
// https://www.khronos.org/registry/OpenGL-Refpages/gl4/html/refract.xhtml
vec3 d1viewPosition=viewPosition+viewRefractDir*maxDistance;
#ifdef PERSPECTIVE_CAMERA
if(d1viewPosition.z>-cameraNear){
//https://tutorial.math.lamar.edu/Classes/CalcIII/EqnsOfLines.aspx
float t=(-cameraNear-viewPosition.z)/viewRefractDir.z;
d1viewPosition=viewPosition+viewRefractDir*t;
}
#endif
d1=viewPositionToXY(d1viewPosition);
float totalLen=length(d1-d0);
float xLen=d1.x-d0.x;
float yLen=d1.y-d0.y;
float totalStep=max(abs(xLen),abs(yLen));
float xSpan=xLen/totalStep;
float ySpan=yLen/totalStep;
#ifdef FILL_HOLE
bool isRough=false;
vec2 uvRough;
#endif
for(float i=0.;i<float(MAX_STEP);i++){
if(i>=totalStep) break;
vec2 xy=vec2(d0.x+i*xSpan,d0.y+i*ySpan);
if(xy.x<0.||xy.x>resolution.x||xy.y<0.||xy.y>resolution.y) break;
float s=length(xy-d0)/totalLen;
vec2 uv=xy/resolution;
float d = getDepth(uv);
float vZ = getViewZ( d );
float cW = cameraProjectionMatrix[2][3] * vZ+cameraProjectionMatrix[3][3];
vec3 vP=getViewPosition( uv, d, cW );
#ifdef PERSPECTIVE_CAMERA
// https://comp.nus.edu.sg/~lowkl/publications/lowk_persp_interp_techrep.pdf
float recipVPZ=1./viewPosition.z;
float viewRefractRayZ=1./(recipVPZ+s*(1./d1viewPosition.z-recipVPZ));
float sD=surfDist*cW;
#else
float viewRefractRayZ=viewPosition.z+s*(d1viewPosition.z-viewPosition.z);
float sD=surfDist;
#endif
#ifdef FILL_HOLE // TODO: May can improve performance by check if INFINITE_THICK too.
if(viewRefractRayZ<=vZ){
if(!isRough){
uvRough=uv;
isRough=true;
}
}
#endif
bool hit;
#ifdef INFINITE_THICK
hit=viewRefractRayZ<=vZ;
#else
if(viewRefractRayZ-sD>vZ) continue;
float away=pointToLineDistance(vP,viewPosition,d1viewPosition);
hit=away<=sD;
#endif
if(hit){
setResultColor(uv);
return;
}
}
#ifdef FILL_HOLE
if(isRough){
setResultColor(uvRough);
}
// else{
// gl_FragColor=texture2D(tDiffuse,vUv);//For afterward add color mix feature.
// }
#else
// gl_FragColor=texture2D(tDiffuse,vUv);//For afterward add color mix feature.
#endif
}
`
}
Example #25
Source File: SSAOEffect.js From three-viewer with MIT License | 4 votes |
/**
* Constructs a new SSAO effect.
*
* @param {Camera} camera - The main camera.
* @param {Texture} normalBuffer - A texture that contains the scene normals. See {@link NormalPass}.
* @param {Object} [options] - The options.
* @param {BlendFunction} [options.blendFunction=BlendFunction.MULTIPLY] - The blend function of this effect.
* @param {Number} [options.samples=11] - The amount of samples per pixel. Should not be a multiple of the ring count.
* @param {Number} [options.rings=4] - The amount of rings in the occlusion sampling pattern.
* @param {Number} [options.distanceThreshold=0.97] - A global distance threshold at which the occlusion effect starts to fade out. Range [0.0, 1.0].
* @param {Number} [options.distanceFalloff=0.03] - The distance falloff. Influences the smoothness of the overall occlusion cutoff. Range [0.0, 1.0].
* @param {Number} [options.rangeThreshold=0.0005] - A local occlusion range threshold at which the occlusion starts to fade out. Range [0.0, 1.0].
* @param {Number} [options.rangeFalloff=0.001] - The occlusion range falloff. Influences the smoothness of the proximity cutoff. Range [0.0, 1.0].
* @param {Number} [options.luminanceInfluence=0.7] - Determines how much the luminance of the scene influences the ambient occlusion.
* @param {Number} [options.radius=18.25] - The occlusion sampling radius.
* @param {Number} [options.scale=1.0] - The scale of the ambient occlusion.
* @param {Number} [options.bias=0.0] - An occlusion bias.
*/
constructor(camera, normalBuffer, {
blendFunction = BlendFunction.MULTIPLY,
samples = 11,
rings = 4,
distanceThreshold = 0.97,
distanceFalloff = 0.03,
rangeThreshold = 0.0005,
rangeFalloff = 0.001,
luminanceInfluence = 0.7,
radius = 18.25,
scale = 1.0,
bias = 0.0
} = {}) {
super("SSAOEffect", fragmentShader, {
blendFunction,
attributes: EffectAttribute.DEPTH,
defines: new Map([
["RINGS_INT", "0"],
["SAMPLES_INT", "0"],
["SAMPLES_FLOAT", "0.0"]
]),
uniforms: new Map([
["normalBuffer", new Uniform(normalBuffer)],
["cameraInverseProjectionMatrix", new Uniform(new Matrix4())],
["cameraProjectionMatrix", new Uniform(new Matrix4())],
["radiusStep", new Uniform(new Vector2())],
["distanceCutoff", new Uniform(new Vector2())],
["proximityCutoff", new Uniform(new Vector2())],
["seed", new Uniform(Math.random())],
["luminanceInfluence", new Uniform(luminanceInfluence)],
["scale", new Uniform(scale)],
["bias", new Uniform(bias)]
])
});
/**
* The current sampling radius.
*
* @type {Number}
* @private
*/
this.r = 0.0;
/**
* The current resolution.
*
* @type {Vector2}
* @private
*/
this.resolution = new Vector2(1, 1);
/**
* The main camera.
*
* @type {Camera}
* @private
*/
this.camera = camera;
this.samples = samples;
this.rings = rings;
this.radius = radius;
this.setDistanceCutoff(distanceThreshold, distanceFalloff);
this.setProximityCutoff(rangeThreshold, rangeFalloff);
}
Example #26
Source File: ionExample.js From 3DTilesRendererJS with Apache License 2.0 | 4 votes |
function reinstantiateTiles() {
let url = hashUrl || '../data/tileset.json';
if ( hashUrl ) {
params.ionAssetId = isInt( hashUrl ) ? hashUrl : '';
}
if ( tiles ) {
offsetParent.remove( tiles.group );
tiles.dispose();
}
if ( params.ionAssetId ) {
url = new URL( `https://api.cesium.com/v1/assets/${params.ionAssetId}/endpoint` );
url.searchParams.append( 'access_token', params.ionAccessToken );
fetch( url, { mode: 'cors' } )
.then( ( res ) => {
if ( res.ok ) {
return res.json();
} else {
return Promise.reject( `${res.status} : ${res.statusText}` );
}
} )
.then( ( json ) => {
url = new URL( json.url );
const version = url.searchParams.get( 'v' );
tiles = new TilesRenderer( url.toString() );
tiles.fetchOptions.headers = {};
tiles.fetchOptions.headers.Authorization = `Bearer ${json.accessToken}`;
tiles.preprocessURL = uri => {
uri = new URL( uri );
if ( /^http/.test( uri.protocol ) ) {
uri.searchParams.append( 'v', version );
}
return uri.toString();
};
tiles.onLoadTileSet = () => {
const box = new Box3();
const sphere = new Sphere();
const matrix = new Matrix4();
let position;
let distanceToEllipsoidCenter;
if ( tiles.getOrientedBounds( box, matrix ) ) {
position = new Vector3().setFromMatrixPosition( matrix );
distanceToEllipsoidCenter = position.length();
} else if ( tiles.getBoundingSphere( sphere ) ) {
position = sphere.center.clone();
distanceToEllipsoidCenter = position.length();
}
const surfaceDirection = position.normalize();
const up = new Vector3( 0, 1, 0 );
const rotationToNorthPole = rotationBetweenDirections( surfaceDirection, up );
tiles.group.quaternion.x = rotationToNorthPole.x;
tiles.group.quaternion.y = rotationToNorthPole.y;
tiles.group.quaternion.z = rotationToNorthPole.z;
tiles.group.quaternion.w = rotationToNorthPole.w;
tiles.group.position.y = - distanceToEllipsoidCenter;
};
setupTiles();
} )
.catch( err => {
console.error( 'Unable to get ion tileset:', err );
} );
} else {
tiles = new TilesRenderer( url );
setupTiles();
}
}
Example #27
Source File: OutlinePass.js From Computer-Graphics with MIT License | 4 votes |
constructor( resolution, scene, camera, selectedObjects ) {
super();
this.renderScene = scene;
this.renderCamera = camera;
this.selectedObjects = selectedObjects !== undefined ? selectedObjects : [];
this.visibleEdgeColor = new Color( 1, 1, 1 );
this.hiddenEdgeColor = new Color( 0.1, 0.04, 0.02 );
this.edgeGlow = 0.0;
this.usePatternTexture = false;
this.edgeThickness = 1.0;
this.edgeStrength = 3.0;
this.downSampleRatio = 2;
this.pulsePeriod = 0;
this._visibilityCache = new Map();
this.resolution = ( resolution !== undefined ) ? new Vector2( resolution.x, resolution.y ) : new Vector2( 256, 256 );
const pars = { minFilter: LinearFilter, magFilter: LinearFilter, format: RGBAFormat };
const resx = Math.round( this.resolution.x / this.downSampleRatio );
const resy = Math.round( this.resolution.y / this.downSampleRatio );
this.renderTargetMaskBuffer = new WebGLRenderTarget( this.resolution.x, this.resolution.y, pars );
this.renderTargetMaskBuffer.texture.name = 'OutlinePass.mask';
this.renderTargetMaskBuffer.texture.generateMipmaps = false;
this.depthMaterial = new MeshDepthMaterial();
this.depthMaterial.side = DoubleSide;
this.depthMaterial.depthPacking = RGBADepthPacking;
this.depthMaterial.blending = NoBlending;
this.prepareMaskMaterial = this.getPrepareMaskMaterial();
this.prepareMaskMaterial.side = DoubleSide;
this.prepareMaskMaterial.fragmentShader = replaceDepthToViewZ( this.prepareMaskMaterial.fragmentShader, this.renderCamera );
this.renderTargetDepthBuffer = new WebGLRenderTarget( this.resolution.x, this.resolution.y, pars );
this.renderTargetDepthBuffer.texture.name = 'OutlinePass.depth';
this.renderTargetDepthBuffer.texture.generateMipmaps = false;
this.renderTargetMaskDownSampleBuffer = new WebGLRenderTarget( resx, resy, pars );
this.renderTargetMaskDownSampleBuffer.texture.name = 'OutlinePass.depthDownSample';
this.renderTargetMaskDownSampleBuffer.texture.generateMipmaps = false;
this.renderTargetBlurBuffer1 = new WebGLRenderTarget( resx, resy, pars );
this.renderTargetBlurBuffer1.texture.name = 'OutlinePass.blur1';
this.renderTargetBlurBuffer1.texture.generateMipmaps = false;
this.renderTargetBlurBuffer2 = new WebGLRenderTarget( Math.round( resx / 2 ), Math.round( resy / 2 ), pars );
this.renderTargetBlurBuffer2.texture.name = 'OutlinePass.blur2';
this.renderTargetBlurBuffer2.texture.generateMipmaps = false;
this.edgeDetectionMaterial = this.getEdgeDetectionMaterial();
this.renderTargetEdgeBuffer1 = new WebGLRenderTarget( resx, resy, pars );
this.renderTargetEdgeBuffer1.texture.name = 'OutlinePass.edge1';
this.renderTargetEdgeBuffer1.texture.generateMipmaps = false;
this.renderTargetEdgeBuffer2 = new WebGLRenderTarget( Math.round( resx / 2 ), Math.round( resy / 2 ), pars );
this.renderTargetEdgeBuffer2.texture.name = 'OutlinePass.edge2';
this.renderTargetEdgeBuffer2.texture.generateMipmaps = false;
const MAX_EDGE_THICKNESS = 4;
const MAX_EDGE_GLOW = 4;
this.separableBlurMaterial1 = this.getSeperableBlurMaterial( MAX_EDGE_THICKNESS );
this.separableBlurMaterial1.uniforms[ 'texSize' ].value.set( resx, resy );
this.separableBlurMaterial1.uniforms[ 'kernelRadius' ].value = 1;
this.separableBlurMaterial2 = this.getSeperableBlurMaterial( MAX_EDGE_GLOW );
this.separableBlurMaterial2.uniforms[ 'texSize' ].value.set( Math.round( resx / 2 ), Math.round( resy / 2 ) );
this.separableBlurMaterial2.uniforms[ 'kernelRadius' ].value = MAX_EDGE_GLOW;
// Overlay material
this.overlayMaterial = this.getOverlayMaterial();
// copy material
if ( CopyShader === undefined ) console.error( 'THREE.OutlinePass relies on CopyShader' );
const copyShader = CopyShader;
this.copyUniforms = UniformsUtils.clone( copyShader.uniforms );
this.copyUniforms[ 'opacity' ].value = 1.0;
this.materialCopy = new ShaderMaterial( {
uniforms: this.copyUniforms,
vertexShader: copyShader.vertexShader,
fragmentShader: copyShader.fragmentShader,
blending: NoBlending,
depthTest: false,
depthWrite: false,
transparent: true
} );
this.enabled = true;
this.needsSwap = false;
this._oldClearColor = new Color();
this.oldClearAlpha = 1;
this.fsQuad = new FullScreenQuad( null );
this.tempPulseColor1 = new Color();
this.tempPulseColor2 = new Color();
this.textureMatrix = new Matrix4();
function replaceDepthToViewZ( string, camera ) {
const type = camera.isPerspectiveCamera ? 'perspective' : 'orthographic';
return string.replace( /DEPTH_TO_VIEW_Z/g, type + 'DepthToViewZ' );
}
}
Example #28
Source File: ColladaLoader.js From Computer-Graphics with MIT License | 4 votes |
parse( text, path ) {
function getElementsByTagName( xml, name ) {
// Non recursive xml.getElementsByTagName() ...
const array = [];
const childNodes = xml.childNodes;
for ( let i = 0, l = childNodes.length; i < l; i ++ ) {
const child = childNodes[ i ];
if ( child.nodeName === name ) {
array.push( child );
}
}
return array;
}
function parseStrings( text ) {
if ( text.length === 0 ) return [];
const parts = text.trim().split( /\s+/ );
const array = new Array( parts.length );
for ( let i = 0, l = parts.length; i < l; i ++ ) {
array[ i ] = parts[ i ];
}
return array;
}
function parseFloats( text ) {
if ( text.length === 0 ) return [];
const parts = text.trim().split( /\s+/ );
const array = new Array( parts.length );
for ( let i = 0, l = parts.length; i < l; i ++ ) {
array[ i ] = parseFloat( parts[ i ] );
}
return array;
}
function parseInts( text ) {
if ( text.length === 0 ) return [];
const parts = text.trim().split( /\s+/ );
const array = new Array( parts.length );
for ( let i = 0, l = parts.length; i < l; i ++ ) {
array[ i ] = parseInt( parts[ i ] );
}
return array;
}
function parseId( text ) {
return text.substring( 1 );
}
function generateId() {
return 'three_default_' + ( count ++ );
}
function isEmpty( object ) {
return Object.keys( object ).length === 0;
}
// asset
function parseAsset( xml ) {
return {
unit: parseAssetUnit( getElementsByTagName( xml, 'unit' )[ 0 ] ),
upAxis: parseAssetUpAxis( getElementsByTagName( xml, 'up_axis' )[ 0 ] )
};
}
function parseAssetUnit( xml ) {
if ( ( xml !== undefined ) && ( xml.hasAttribute( 'meter' ) === true ) ) {
return parseFloat( xml.getAttribute( 'meter' ) );
} else {
return 1; // default 1 meter
}
}
function parseAssetUpAxis( xml ) {
return xml !== undefined ? xml.textContent : 'Y_UP';
}
// library
function parseLibrary( xml, libraryName, nodeName, parser ) {
const library = getElementsByTagName( xml, libraryName )[ 0 ];
if ( library !== undefined ) {
const elements = getElementsByTagName( library, nodeName );
for ( let i = 0; i < elements.length; i ++ ) {
parser( elements[ i ] );
}
}
}
function buildLibrary( data, builder ) {
for ( const name in data ) {
const object = data[ name ];
object.build = builder( data[ name ] );
}
}
// get
function getBuild( data, builder ) {
if ( data.build !== undefined ) return data.build;
data.build = builder( data );
return data.build;
}
// animation
function parseAnimation( xml ) {
const data = {
sources: {},
samplers: {},
channels: {}
};
let hasChildren = false;
for ( let i = 0, l = xml.childNodes.length; i < l; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
let id;
switch ( child.nodeName ) {
case 'source':
id = child.getAttribute( 'id' );
data.sources[ id ] = parseSource( child );
break;
case 'sampler':
id = child.getAttribute( 'id' );
data.samplers[ id ] = parseAnimationSampler( child );
break;
case 'channel':
id = child.getAttribute( 'target' );
data.channels[ id ] = parseAnimationChannel( child );
break;
case 'animation':
// hierarchy of related animations
parseAnimation( child );
hasChildren = true;
break;
default:
console.log( child );
}
}
if ( hasChildren === false ) {
// since 'id' attributes can be optional, it's necessary to generate a UUID for unqiue assignment
library.animations[ xml.getAttribute( 'id' ) || MathUtils.generateUUID() ] = data;
}
}
function parseAnimationSampler( xml ) {
const data = {
inputs: {},
};
for ( let i = 0, l = xml.childNodes.length; i < l; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'input':
const id = parseId( child.getAttribute( 'source' ) );
const semantic = child.getAttribute( 'semantic' );
data.inputs[ semantic ] = id;
break;
}
}
return data;
}
function parseAnimationChannel( xml ) {
const data = {};
const target = xml.getAttribute( 'target' );
// parsing SID Addressing Syntax
let parts = target.split( '/' );
const id = parts.shift();
let sid = parts.shift();
// check selection syntax
const arraySyntax = ( sid.indexOf( '(' ) !== - 1 );
const memberSyntax = ( sid.indexOf( '.' ) !== - 1 );
if ( memberSyntax ) {
// member selection access
parts = sid.split( '.' );
sid = parts.shift();
data.member = parts.shift();
} else if ( arraySyntax ) {
// array-access syntax. can be used to express fields in one-dimensional vectors or two-dimensional matrices.
const indices = sid.split( '(' );
sid = indices.shift();
for ( let i = 0; i < indices.length; i ++ ) {
indices[ i ] = parseInt( indices[ i ].replace( /\)/, '' ) );
}
data.indices = indices;
}
data.id = id;
data.sid = sid;
data.arraySyntax = arraySyntax;
data.memberSyntax = memberSyntax;
data.sampler = parseId( xml.getAttribute( 'source' ) );
return data;
}
function buildAnimation( data ) {
const tracks = [];
const channels = data.channels;
const samplers = data.samplers;
const sources = data.sources;
for ( const target in channels ) {
if ( channels.hasOwnProperty( target ) ) {
const channel = channels[ target ];
const sampler = samplers[ channel.sampler ];
const inputId = sampler.inputs.INPUT;
const outputId = sampler.inputs.OUTPUT;
const inputSource = sources[ inputId ];
const outputSource = sources[ outputId ];
const animation = buildAnimationChannel( channel, inputSource, outputSource );
createKeyframeTracks( animation, tracks );
}
}
return tracks;
}
function getAnimation( id ) {
return getBuild( library.animations[ id ], buildAnimation );
}
function buildAnimationChannel( channel, inputSource, outputSource ) {
const node = library.nodes[ channel.id ];
const object3D = getNode( node.id );
const transform = node.transforms[ channel.sid ];
const defaultMatrix = node.matrix.clone().transpose();
let time, stride;
let i, il, j, jl;
const data = {};
// the collada spec allows the animation of data in various ways.
// depending on the transform type (matrix, translate, rotate, scale), we execute different logic
switch ( transform ) {
case 'matrix':
for ( i = 0, il = inputSource.array.length; i < il; i ++ ) {
time = inputSource.array[ i ];
stride = i * outputSource.stride;
if ( data[ time ] === undefined ) data[ time ] = {};
if ( channel.arraySyntax === true ) {
const value = outputSource.array[ stride ];
const index = channel.indices[ 0 ] + 4 * channel.indices[ 1 ];
data[ time ][ index ] = value;
} else {
for ( j = 0, jl = outputSource.stride; j < jl; j ++ ) {
data[ time ][ j ] = outputSource.array[ stride + j ];
}
}
}
break;
case 'translate':
console.warn( 'THREE.ColladaLoader: Animation transform type "%s" not yet implemented.', transform );
break;
case 'rotate':
console.warn( 'THREE.ColladaLoader: Animation transform type "%s" not yet implemented.', transform );
break;
case 'scale':
console.warn( 'THREE.ColladaLoader: Animation transform type "%s" not yet implemented.', transform );
break;
}
const keyframes = prepareAnimationData( data, defaultMatrix );
const animation = {
name: object3D.uuid,
keyframes: keyframes
};
return animation;
}
function prepareAnimationData( data, defaultMatrix ) {
const keyframes = [];
// transfer data into a sortable array
for ( const time in data ) {
keyframes.push( { time: parseFloat( time ), value: data[ time ] } );
}
// ensure keyframes are sorted by time
keyframes.sort( ascending );
// now we clean up all animation data, so we can use them for keyframe tracks
for ( let i = 0; i < 16; i ++ ) {
transformAnimationData( keyframes, i, defaultMatrix.elements[ i ] );
}
return keyframes;
// array sort function
function ascending( a, b ) {
return a.time - b.time;
}
}
const position = new Vector3();
const scale = new Vector3();
const quaternion = new Quaternion();
function createKeyframeTracks( animation, tracks ) {
const keyframes = animation.keyframes;
const name = animation.name;
const times = [];
const positionData = [];
const quaternionData = [];
const scaleData = [];
for ( let i = 0, l = keyframes.length; i < l; i ++ ) {
const keyframe = keyframes[ i ];
const time = keyframe.time;
const value = keyframe.value;
matrix.fromArray( value ).transpose();
matrix.decompose( position, quaternion, scale );
times.push( time );
positionData.push( position.x, position.y, position.z );
quaternionData.push( quaternion.x, quaternion.y, quaternion.z, quaternion.w );
scaleData.push( scale.x, scale.y, scale.z );
}
if ( positionData.length > 0 ) tracks.push( new VectorKeyframeTrack( name + '.position', times, positionData ) );
if ( quaternionData.length > 0 ) tracks.push( new QuaternionKeyframeTrack( name + '.quaternion', times, quaternionData ) );
if ( scaleData.length > 0 ) tracks.push( new VectorKeyframeTrack( name + '.scale', times, scaleData ) );
return tracks;
}
function transformAnimationData( keyframes, property, defaultValue ) {
let keyframe;
let empty = true;
let i, l;
// check, if values of a property are missing in our keyframes
for ( i = 0, l = keyframes.length; i < l; i ++ ) {
keyframe = keyframes[ i ];
if ( keyframe.value[ property ] === undefined ) {
keyframe.value[ property ] = null; // mark as missing
} else {
empty = false;
}
}
if ( empty === true ) {
// no values at all, so we set a default value
for ( i = 0, l = keyframes.length; i < l; i ++ ) {
keyframe = keyframes[ i ];
keyframe.value[ property ] = defaultValue;
}
} else {
// filling gaps
createMissingKeyframes( keyframes, property );
}
}
function createMissingKeyframes( keyframes, property ) {
let prev, next;
for ( let i = 0, l = keyframes.length; i < l; i ++ ) {
const keyframe = keyframes[ i ];
if ( keyframe.value[ property ] === null ) {
prev = getPrev( keyframes, i, property );
next = getNext( keyframes, i, property );
if ( prev === null ) {
keyframe.value[ property ] = next.value[ property ];
continue;
}
if ( next === null ) {
keyframe.value[ property ] = prev.value[ property ];
continue;
}
interpolate( keyframe, prev, next, property );
}
}
}
function getPrev( keyframes, i, property ) {
while ( i >= 0 ) {
const keyframe = keyframes[ i ];
if ( keyframe.value[ property ] !== null ) return keyframe;
i --;
}
return null;
}
function getNext( keyframes, i, property ) {
while ( i < keyframes.length ) {
const keyframe = keyframes[ i ];
if ( keyframe.value[ property ] !== null ) return keyframe;
i ++;
}
return null;
}
function interpolate( key, prev, next, property ) {
if ( ( next.time - prev.time ) === 0 ) {
key.value[ property ] = prev.value[ property ];
return;
}
key.value[ property ] = ( ( key.time - prev.time ) * ( next.value[ property ] - prev.value[ property ] ) / ( next.time - prev.time ) ) + prev.value[ property ];
}
// animation clips
function parseAnimationClip( xml ) {
const data = {
name: xml.getAttribute( 'id' ) || 'default',
start: parseFloat( xml.getAttribute( 'start' ) || 0 ),
end: parseFloat( xml.getAttribute( 'end' ) || 0 ),
animations: []
};
for ( let i = 0, l = xml.childNodes.length; i < l; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'instance_animation':
data.animations.push( parseId( child.getAttribute( 'url' ) ) );
break;
}
}
library.clips[ xml.getAttribute( 'id' ) ] = data;
}
function buildAnimationClip( data ) {
const tracks = [];
const name = data.name;
const duration = ( data.end - data.start ) || - 1;
const animations = data.animations;
for ( let i = 0, il = animations.length; i < il; i ++ ) {
const animationTracks = getAnimation( animations[ i ] );
for ( let j = 0, jl = animationTracks.length; j < jl; j ++ ) {
tracks.push( animationTracks[ j ] );
}
}
return new AnimationClip( name, duration, tracks );
}
function getAnimationClip( id ) {
return getBuild( library.clips[ id ], buildAnimationClip );
}
// controller
function parseController( xml ) {
const data = {};
for ( let i = 0, l = xml.childNodes.length; i < l; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'skin':
// there is exactly one skin per controller
data.id = parseId( child.getAttribute( 'source' ) );
data.skin = parseSkin( child );
break;
case 'morph':
data.id = parseId( child.getAttribute( 'source' ) );
console.warn( 'THREE.ColladaLoader: Morph target animation not supported yet.' );
break;
}
}
library.controllers[ xml.getAttribute( 'id' ) ] = data;
}
function parseSkin( xml ) {
const data = {
sources: {}
};
for ( let i = 0, l = xml.childNodes.length; i < l; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'bind_shape_matrix':
data.bindShapeMatrix = parseFloats( child.textContent );
break;
case 'source':
const id = child.getAttribute( 'id' );
data.sources[ id ] = parseSource( child );
break;
case 'joints':
data.joints = parseJoints( child );
break;
case 'vertex_weights':
data.vertexWeights = parseVertexWeights( child );
break;
}
}
return data;
}
function parseJoints( xml ) {
const data = {
inputs: {}
};
for ( let i = 0, l = xml.childNodes.length; i < l; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'input':
const semantic = child.getAttribute( 'semantic' );
const id = parseId( child.getAttribute( 'source' ) );
data.inputs[ semantic ] = id;
break;
}
}
return data;
}
function parseVertexWeights( xml ) {
const data = {
inputs: {}
};
for ( let i = 0, l = xml.childNodes.length; i < l; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'input':
const semantic = child.getAttribute( 'semantic' );
const id = parseId( child.getAttribute( 'source' ) );
const offset = parseInt( child.getAttribute( 'offset' ) );
data.inputs[ semantic ] = { id: id, offset: offset };
break;
case 'vcount':
data.vcount = parseInts( child.textContent );
break;
case 'v':
data.v = parseInts( child.textContent );
break;
}
}
return data;
}
function buildController( data ) {
const build = {
id: data.id
};
const geometry = library.geometries[ build.id ];
if ( data.skin !== undefined ) {
build.skin = buildSkin( data.skin );
// we enhance the 'sources' property of the corresponding geometry with our skin data
geometry.sources.skinIndices = build.skin.indices;
geometry.sources.skinWeights = build.skin.weights;
}
return build;
}
function buildSkin( data ) {
const BONE_LIMIT = 4;
const build = {
joints: [], // this must be an array to preserve the joint order
indices: {
array: [],
stride: BONE_LIMIT
},
weights: {
array: [],
stride: BONE_LIMIT
}
};
const sources = data.sources;
const vertexWeights = data.vertexWeights;
const vcount = vertexWeights.vcount;
const v = vertexWeights.v;
const jointOffset = vertexWeights.inputs.JOINT.offset;
const weightOffset = vertexWeights.inputs.WEIGHT.offset;
const jointSource = data.sources[ data.joints.inputs.JOINT ];
const inverseSource = data.sources[ data.joints.inputs.INV_BIND_MATRIX ];
const weights = sources[ vertexWeights.inputs.WEIGHT.id ].array;
let stride = 0;
let i, j, l;
// procces skin data for each vertex
for ( i = 0, l = vcount.length; i < l; i ++ ) {
const jointCount = vcount[ i ]; // this is the amount of joints that affect a single vertex
const vertexSkinData = [];
for ( j = 0; j < jointCount; j ++ ) {
const skinIndex = v[ stride + jointOffset ];
const weightId = v[ stride + weightOffset ];
const skinWeight = weights[ weightId ];
vertexSkinData.push( { index: skinIndex, weight: skinWeight } );
stride += 2;
}
// we sort the joints in descending order based on the weights.
// this ensures, we only procced the most important joints of the vertex
vertexSkinData.sort( descending );
// now we provide for each vertex a set of four index and weight values.
// the order of the skin data matches the order of vertices
for ( j = 0; j < BONE_LIMIT; j ++ ) {
const d = vertexSkinData[ j ];
if ( d !== undefined ) {
build.indices.array.push( d.index );
build.weights.array.push( d.weight );
} else {
build.indices.array.push( 0 );
build.weights.array.push( 0 );
}
}
}
// setup bind matrix
if ( data.bindShapeMatrix ) {
build.bindMatrix = new Matrix4().fromArray( data.bindShapeMatrix ).transpose();
} else {
build.bindMatrix = new Matrix4().identity();
}
// process bones and inverse bind matrix data
for ( i = 0, l = jointSource.array.length; i < l; i ++ ) {
const name = jointSource.array[ i ];
const boneInverse = new Matrix4().fromArray( inverseSource.array, i * inverseSource.stride ).transpose();
build.joints.push( { name: name, boneInverse: boneInverse } );
}
return build;
// array sort function
function descending( a, b ) {
return b.weight - a.weight;
}
}
function getController( id ) {
return getBuild( library.controllers[ id ], buildController );
}
// image
function parseImage( xml ) {
const data = {
init_from: getElementsByTagName( xml, 'init_from' )[ 0 ].textContent
};
library.images[ xml.getAttribute( 'id' ) ] = data;
}
function buildImage( data ) {
if ( data.build !== undefined ) return data.build;
return data.init_from;
}
function getImage( id ) {
const data = library.images[ id ];
if ( data !== undefined ) {
return getBuild( data, buildImage );
}
console.warn( 'THREE.ColladaLoader: Couldn\'t find image with ID:', id );
return null;
}
// effect
function parseEffect( xml ) {
const data = {};
for ( let i = 0, l = xml.childNodes.length; i < l; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'profile_COMMON':
data.profile = parseEffectProfileCOMMON( child );
break;
}
}
library.effects[ xml.getAttribute( 'id' ) ] = data;
}
function parseEffectProfileCOMMON( xml ) {
const data = {
surfaces: {},
samplers: {}
};
for ( let i = 0, l = xml.childNodes.length; i < l; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'newparam':
parseEffectNewparam( child, data );
break;
case 'technique':
data.technique = parseEffectTechnique( child );
break;
case 'extra':
data.extra = parseEffectExtra( child );
break;
}
}
return data;
}
function parseEffectNewparam( xml, data ) {
const sid = xml.getAttribute( 'sid' );
for ( let i = 0, l = xml.childNodes.length; i < l; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'surface':
data.surfaces[ sid ] = parseEffectSurface( child );
break;
case 'sampler2D':
data.samplers[ sid ] = parseEffectSampler( child );
break;
}
}
}
function parseEffectSurface( xml ) {
const data = {};
for ( let i = 0, l = xml.childNodes.length; i < l; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'init_from':
data.init_from = child.textContent;
break;
}
}
return data;
}
function parseEffectSampler( xml ) {
const data = {};
for ( let i = 0, l = xml.childNodes.length; i < l; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'source':
data.source = child.textContent;
break;
}
}
return data;
}
function parseEffectTechnique( xml ) {
const data = {};
for ( let i = 0, l = xml.childNodes.length; i < l; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'constant':
case 'lambert':
case 'blinn':
case 'phong':
data.type = child.nodeName;
data.parameters = parseEffectParameters( child );
break;
case 'extra':
data.extra = parseEffectExtra( child );
break;
}
}
return data;
}
function parseEffectParameters( xml ) {
const data = {};
for ( let i = 0, l = xml.childNodes.length; i < l; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'emission':
case 'diffuse':
case 'specular':
case 'bump':
case 'ambient':
case 'shininess':
case 'transparency':
data[ child.nodeName ] = parseEffectParameter( child );
break;
case 'transparent':
data[ child.nodeName ] = {
opaque: child.hasAttribute( 'opaque' ) ? child.getAttribute( 'opaque' ) : 'A_ONE',
data: parseEffectParameter( child )
};
break;
}
}
return data;
}
function parseEffectParameter( xml ) {
const data = {};
for ( let i = 0, l = xml.childNodes.length; i < l; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'color':
data[ child.nodeName ] = parseFloats( child.textContent );
break;
case 'float':
data[ child.nodeName ] = parseFloat( child.textContent );
break;
case 'texture':
data[ child.nodeName ] = { id: child.getAttribute( 'texture' ), extra: parseEffectParameterTexture( child ) };
break;
}
}
return data;
}
function parseEffectParameterTexture( xml ) {
const data = {
technique: {}
};
for ( let i = 0, l = xml.childNodes.length; i < l; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'extra':
parseEffectParameterTextureExtra( child, data );
break;
}
}
return data;
}
function parseEffectParameterTextureExtra( xml, data ) {
for ( let i = 0, l = xml.childNodes.length; i < l; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'technique':
parseEffectParameterTextureExtraTechnique( child, data );
break;
}
}
}
function parseEffectParameterTextureExtraTechnique( xml, data ) {
for ( let i = 0, l = xml.childNodes.length; i < l; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'repeatU':
case 'repeatV':
case 'offsetU':
case 'offsetV':
data.technique[ child.nodeName ] = parseFloat( child.textContent );
break;
case 'wrapU':
case 'wrapV':
// some files have values for wrapU/wrapV which become NaN via parseInt
if ( child.textContent.toUpperCase() === 'TRUE' ) {
data.technique[ child.nodeName ] = 1;
} else if ( child.textContent.toUpperCase() === 'FALSE' ) {
data.technique[ child.nodeName ] = 0;
} else {
data.technique[ child.nodeName ] = parseInt( child.textContent );
}
break;
case 'bump':
data[ child.nodeName ] = parseEffectExtraTechniqueBump( child );
break;
}
}
}
function parseEffectExtra( xml ) {
const data = {};
for ( let i = 0, l = xml.childNodes.length; i < l; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'technique':
data.technique = parseEffectExtraTechnique( child );
break;
}
}
return data;
}
function parseEffectExtraTechnique( xml ) {
const data = {};
for ( let i = 0, l = xml.childNodes.length; i < l; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'double_sided':
data[ child.nodeName ] = parseInt( child.textContent );
break;
case 'bump':
data[ child.nodeName ] = parseEffectExtraTechniqueBump( child );
break;
}
}
return data;
}
function parseEffectExtraTechniqueBump( xml ) {
const data = {};
for ( let i = 0, l = xml.childNodes.length; i < l; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'texture':
data[ child.nodeName ] = { id: child.getAttribute( 'texture' ), texcoord: child.getAttribute( 'texcoord' ), extra: parseEffectParameterTexture( child ) };
break;
}
}
return data;
}
function buildEffect( data ) {
return data;
}
function getEffect( id ) {
return getBuild( library.effects[ id ], buildEffect );
}
// material
function parseMaterial( xml ) {
const data = {
name: xml.getAttribute( 'name' )
};
for ( let i = 0, l = xml.childNodes.length; i < l; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'instance_effect':
data.url = parseId( child.getAttribute( 'url' ) );
break;
}
}
library.materials[ xml.getAttribute( 'id' ) ] = data;
}
function getTextureLoader( image ) {
let loader;
let extension = image.slice( ( image.lastIndexOf( '.' ) - 1 >>> 0 ) + 2 ); // http://www.jstips.co/en/javascript/get-file-extension/
extension = extension.toLowerCase();
switch ( extension ) {
case 'tga':
loader = tgaLoader;
break;
default:
loader = textureLoader;
}
return loader;
}
function buildMaterial( data ) {
const effect = getEffect( data.url );
const technique = effect.profile.technique;
let material;
switch ( technique.type ) {
case 'phong':
case 'blinn':
material = new MeshPhongMaterial();
break;
case 'lambert':
material = new MeshLambertMaterial();
break;
default:
material = new MeshBasicMaterial();
break;
}
material.name = data.name || '';
function getTexture( textureObject, encoding = null ) {
const sampler = effect.profile.samplers[ textureObject.id ];
let image = null;
// get image
if ( sampler !== undefined ) {
const surface = effect.profile.surfaces[ sampler.source ];
image = getImage( surface.init_from );
} else {
console.warn( 'THREE.ColladaLoader: Undefined sampler. Access image directly (see #12530).' );
image = getImage( textureObject.id );
}
// create texture if image is avaiable
if ( image !== null ) {
const loader = getTextureLoader( image );
if ( loader !== undefined ) {
const texture = loader.load( image );
const extra = textureObject.extra;
if ( extra !== undefined && extra.technique !== undefined && isEmpty( extra.technique ) === false ) {
const technique = extra.technique;
texture.wrapS = technique.wrapU ? RepeatWrapping : ClampToEdgeWrapping;
texture.wrapT = technique.wrapV ? RepeatWrapping : ClampToEdgeWrapping;
texture.offset.set( technique.offsetU || 0, technique.offsetV || 0 );
texture.repeat.set( technique.repeatU || 1, technique.repeatV || 1 );
} else {
texture.wrapS = RepeatWrapping;
texture.wrapT = RepeatWrapping;
}
if ( encoding !== null ) {
texture.encoding = encoding;
}
return texture;
} else {
console.warn( 'THREE.ColladaLoader: Loader for texture %s not found.', image );
return null;
}
} else {
console.warn( 'THREE.ColladaLoader: Couldn\'t create texture with ID:', textureObject.id );
return null;
}
}
const parameters = technique.parameters;
for ( const key in parameters ) {
const parameter = parameters[ key ];
switch ( key ) {
case 'diffuse':
if ( parameter.color ) material.color.fromArray( parameter.color );
if ( parameter.texture ) material.map = getTexture( parameter.texture, sRGBEncoding );
break;
case 'specular':
if ( parameter.color && material.specular ) material.specular.fromArray( parameter.color );
if ( parameter.texture ) material.specularMap = getTexture( parameter.texture );
break;
case 'bump':
if ( parameter.texture ) material.normalMap = getTexture( parameter.texture );
break;
case 'ambient':
if ( parameter.texture ) material.lightMap = getTexture( parameter.texture, sRGBEncoding );
break;
case 'shininess':
if ( parameter.float && material.shininess ) material.shininess = parameter.float;
break;
case 'emission':
if ( parameter.color && material.emissive ) material.emissive.fromArray( parameter.color );
if ( parameter.texture ) material.emissiveMap = getTexture( parameter.texture, sRGBEncoding );
break;
}
}
material.color.convertSRGBToLinear();
if ( material.specular ) material.specular.convertSRGBToLinear();
if ( material.emissive ) material.emissive.convertSRGBToLinear();
//
let transparent = parameters[ 'transparent' ];
let transparency = parameters[ 'transparency' ];
// <transparency> does not exist but <transparent>
if ( transparency === undefined && transparent ) {
transparency = {
float: 1
};
}
// <transparent> does not exist but <transparency>
if ( transparent === undefined && transparency ) {
transparent = {
opaque: 'A_ONE',
data: {
color: [ 1, 1, 1, 1 ]
} };
}
if ( transparent && transparency ) {
// handle case if a texture exists but no color
if ( transparent.data.texture ) {
// we do not set an alpha map (see #13792)
material.transparent = true;
} else {
const color = transparent.data.color;
switch ( transparent.opaque ) {
case 'A_ONE':
material.opacity = color[ 3 ] * transparency.float;
break;
case 'RGB_ZERO':
material.opacity = 1 - ( color[ 0 ] * transparency.float );
break;
case 'A_ZERO':
material.opacity = 1 - ( color[ 3 ] * transparency.float );
break;
case 'RGB_ONE':
material.opacity = color[ 0 ] * transparency.float;
break;
default:
console.warn( 'THREE.ColladaLoader: Invalid opaque type "%s" of transparent tag.', transparent.opaque );
}
if ( material.opacity < 1 ) material.transparent = true;
}
}
//
if ( technique.extra !== undefined && technique.extra.technique !== undefined ) {
const techniques = technique.extra.technique;
for ( const k in techniques ) {
const v = techniques[ k ];
switch ( k ) {
case 'double_sided':
material.side = ( v === 1 ? DoubleSide : FrontSide );
break;
case 'bump':
material.normalMap = getTexture( v.texture );
material.normalScale = new Vector2( 1, 1 );
break;
}
}
}
return material;
}
function getMaterial( id ) {
return getBuild( library.materials[ id ], buildMaterial );
}
// camera
function parseCamera( xml ) {
const data = {
name: xml.getAttribute( 'name' )
};
for ( let i = 0, l = xml.childNodes.length; i < l; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'optics':
data.optics = parseCameraOptics( child );
break;
}
}
library.cameras[ xml.getAttribute( 'id' ) ] = data;
}
function parseCameraOptics( xml ) {
for ( let i = 0; i < xml.childNodes.length; i ++ ) {
const child = xml.childNodes[ i ];
switch ( child.nodeName ) {
case 'technique_common':
return parseCameraTechnique( child );
}
}
return {};
}
function parseCameraTechnique( xml ) {
const data = {};
for ( let i = 0; i < xml.childNodes.length; i ++ ) {
const child = xml.childNodes[ i ];
switch ( child.nodeName ) {
case 'perspective':
case 'orthographic':
data.technique = child.nodeName;
data.parameters = parseCameraParameters( child );
break;
}
}
return data;
}
function parseCameraParameters( xml ) {
const data = {};
for ( let i = 0; i < xml.childNodes.length; i ++ ) {
const child = xml.childNodes[ i ];
switch ( child.nodeName ) {
case 'xfov':
case 'yfov':
case 'xmag':
case 'ymag':
case 'znear':
case 'zfar':
case 'aspect_ratio':
data[ child.nodeName ] = parseFloat( child.textContent );
break;
}
}
return data;
}
function buildCamera( data ) {
let camera;
switch ( data.optics.technique ) {
case 'perspective':
camera = new PerspectiveCamera(
data.optics.parameters.yfov,
data.optics.parameters.aspect_ratio,
data.optics.parameters.znear,
data.optics.parameters.zfar
);
break;
case 'orthographic':
let ymag = data.optics.parameters.ymag;
let xmag = data.optics.parameters.xmag;
const aspectRatio = data.optics.parameters.aspect_ratio;
xmag = ( xmag === undefined ) ? ( ymag * aspectRatio ) : xmag;
ymag = ( ymag === undefined ) ? ( xmag / aspectRatio ) : ymag;
xmag *= 0.5;
ymag *= 0.5;
camera = new OrthographicCamera(
- xmag, xmag, ymag, - ymag, // left, right, top, bottom
data.optics.parameters.znear,
data.optics.parameters.zfar
);
break;
default:
camera = new PerspectiveCamera();
break;
}
camera.name = data.name || '';
return camera;
}
function getCamera( id ) {
const data = library.cameras[ id ];
if ( data !== undefined ) {
return getBuild( data, buildCamera );
}
console.warn( 'THREE.ColladaLoader: Couldn\'t find camera with ID:', id );
return null;
}
// light
function parseLight( xml ) {
let data = {};
for ( let i = 0, l = xml.childNodes.length; i < l; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'technique_common':
data = parseLightTechnique( child );
break;
}
}
library.lights[ xml.getAttribute( 'id' ) ] = data;
}
function parseLightTechnique( xml ) {
const data = {};
for ( let i = 0, l = xml.childNodes.length; i < l; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'directional':
case 'point':
case 'spot':
case 'ambient':
data.technique = child.nodeName;
data.parameters = parseLightParameters( child );
}
}
return data;
}
function parseLightParameters( xml ) {
const data = {};
for ( let i = 0, l = xml.childNodes.length; i < l; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'color':
const array = parseFloats( child.textContent );
data.color = new Color().fromArray( array ).convertSRGBToLinear();
break;
case 'falloff_angle':
data.falloffAngle = parseFloat( child.textContent );
break;
case 'quadratic_attenuation':
const f = parseFloat( child.textContent );
data.distance = f ? Math.sqrt( 1 / f ) : 0;
break;
}
}
return data;
}
function buildLight( data ) {
let light;
switch ( data.technique ) {
case 'directional':
light = new DirectionalLight();
break;
case 'point':
light = new PointLight();
break;
case 'spot':
light = new SpotLight();
break;
case 'ambient':
light = new AmbientLight();
break;
}
if ( data.parameters.color ) light.color.copy( data.parameters.color );
if ( data.parameters.distance ) light.distance = data.parameters.distance;
return light;
}
function getLight( id ) {
const data = library.lights[ id ];
if ( data !== undefined ) {
return getBuild( data, buildLight );
}
console.warn( 'THREE.ColladaLoader: Couldn\'t find light with ID:', id );
return null;
}
// geometry
function parseGeometry( xml ) {
const data = {
name: xml.getAttribute( 'name' ),
sources: {},
vertices: {},
primitives: []
};
const mesh = getElementsByTagName( xml, 'mesh' )[ 0 ];
// the following tags inside geometry are not supported yet (see https://github.com/mrdoob/three.js/pull/12606): convex_mesh, spline, brep
if ( mesh === undefined ) return;
for ( let i = 0; i < mesh.childNodes.length; i ++ ) {
const child = mesh.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
const id = child.getAttribute( 'id' );
switch ( child.nodeName ) {
case 'source':
data.sources[ id ] = parseSource( child );
break;
case 'vertices':
// data.sources[ id ] = data.sources[ parseId( getElementsByTagName( child, 'input' )[ 0 ].getAttribute( 'source' ) ) ];
data.vertices = parseGeometryVertices( child );
break;
case 'polygons':
console.warn( 'THREE.ColladaLoader: Unsupported primitive type: ', child.nodeName );
break;
case 'lines':
case 'linestrips':
case 'polylist':
case 'triangles':
data.primitives.push( parseGeometryPrimitive( child ) );
break;
default:
console.log( child );
}
}
library.geometries[ xml.getAttribute( 'id' ) ] = data;
}
function parseSource( xml ) {
const data = {
array: [],
stride: 3
};
for ( let i = 0; i < xml.childNodes.length; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'float_array':
data.array = parseFloats( child.textContent );
break;
case 'Name_array':
data.array = parseStrings( child.textContent );
break;
case 'technique_common':
const accessor = getElementsByTagName( child, 'accessor' )[ 0 ];
if ( accessor !== undefined ) {
data.stride = parseInt( accessor.getAttribute( 'stride' ) );
}
break;
}
}
return data;
}
function parseGeometryVertices( xml ) {
const data = {};
for ( let i = 0; i < xml.childNodes.length; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
data[ child.getAttribute( 'semantic' ) ] = parseId( child.getAttribute( 'source' ) );
}
return data;
}
function parseGeometryPrimitive( xml ) {
const primitive = {
type: xml.nodeName,
material: xml.getAttribute( 'material' ),
count: parseInt( xml.getAttribute( 'count' ) ),
inputs: {},
stride: 0,
hasUV: false
};
for ( let i = 0, l = xml.childNodes.length; i < l; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'input':
const id = parseId( child.getAttribute( 'source' ) );
const semantic = child.getAttribute( 'semantic' );
const offset = parseInt( child.getAttribute( 'offset' ) );
const set = parseInt( child.getAttribute( 'set' ) );
const inputname = ( set > 0 ? semantic + set : semantic );
primitive.inputs[ inputname ] = { id: id, offset: offset };
primitive.stride = Math.max( primitive.stride, offset + 1 );
if ( semantic === 'TEXCOORD' ) primitive.hasUV = true;
break;
case 'vcount':
primitive.vcount = parseInts( child.textContent );
break;
case 'p':
primitive.p = parseInts( child.textContent );
break;
}
}
return primitive;
}
function groupPrimitives( primitives ) {
const build = {};
for ( let i = 0; i < primitives.length; i ++ ) {
const primitive = primitives[ i ];
if ( build[ primitive.type ] === undefined ) build[ primitive.type ] = [];
build[ primitive.type ].push( primitive );
}
return build;
}
function checkUVCoordinates( primitives ) {
let count = 0;
for ( let i = 0, l = primitives.length; i < l; i ++ ) {
const primitive = primitives[ i ];
if ( primitive.hasUV === true ) {
count ++;
}
}
if ( count > 0 && count < primitives.length ) {
primitives.uvsNeedsFix = true;
}
}
function buildGeometry( data ) {
const build = {};
const sources = data.sources;
const vertices = data.vertices;
const primitives = data.primitives;
if ( primitives.length === 0 ) return {};
// our goal is to create one buffer geometry for a single type of primitives
// first, we group all primitives by their type
const groupedPrimitives = groupPrimitives( primitives );
for ( const type in groupedPrimitives ) {
const primitiveType = groupedPrimitives[ type ];
// second, ensure consistent uv coordinates for each type of primitives (polylist,triangles or lines)
checkUVCoordinates( primitiveType );
// third, create a buffer geometry for each type of primitives
build[ type ] = buildGeometryType( primitiveType, sources, vertices );
}
return build;
}
function buildGeometryType( primitives, sources, vertices ) {
const build = {};
const position = { array: [], stride: 0 };
const normal = { array: [], stride: 0 };
const uv = { array: [], stride: 0 };
const uv2 = { array: [], stride: 0 };
const color = { array: [], stride: 0 };
const skinIndex = { array: [], stride: 4 };
const skinWeight = { array: [], stride: 4 };
const geometry = new BufferGeometry();
const materialKeys = [];
let start = 0;
for ( let p = 0; p < primitives.length; p ++ ) {
const primitive = primitives[ p ];
const inputs = primitive.inputs;
// groups
let count = 0;
switch ( primitive.type ) {
case 'lines':
case 'linestrips':
count = primitive.count * 2;
break;
case 'triangles':
count = primitive.count * 3;
break;
case 'polylist':
for ( let g = 0; g < primitive.count; g ++ ) {
const vc = primitive.vcount[ g ];
switch ( vc ) {
case 3:
count += 3; // single triangle
break;
case 4:
count += 6; // quad, subdivided into two triangles
break;
default:
count += ( vc - 2 ) * 3; // polylist with more than four vertices
break;
}
}
break;
default:
console.warn( 'THREE.ColladaLoader: Unknow primitive type:', primitive.type );
}
geometry.addGroup( start, count, p );
start += count;
// material
if ( primitive.material ) {
materialKeys.push( primitive.material );
}
// geometry data
for ( const name in inputs ) {
const input = inputs[ name ];
switch ( name ) {
case 'VERTEX':
for ( const key in vertices ) {
const id = vertices[ key ];
switch ( key ) {
case 'POSITION':
const prevLength = position.array.length;
buildGeometryData( primitive, sources[ id ], input.offset, position.array );
position.stride = sources[ id ].stride;
if ( sources.skinWeights && sources.skinIndices ) {
buildGeometryData( primitive, sources.skinIndices, input.offset, skinIndex.array );
buildGeometryData( primitive, sources.skinWeights, input.offset, skinWeight.array );
}
// see #3803
if ( primitive.hasUV === false && primitives.uvsNeedsFix === true ) {
const count = ( position.array.length - prevLength ) / position.stride;
for ( let i = 0; i < count; i ++ ) {
// fill missing uv coordinates
uv.array.push( 0, 0 );
}
}
break;
case 'NORMAL':
buildGeometryData( primitive, sources[ id ], input.offset, normal.array );
normal.stride = sources[ id ].stride;
break;
case 'COLOR':
buildGeometryData( primitive, sources[ id ], input.offset, color.array );
color.stride = sources[ id ].stride;
break;
case 'TEXCOORD':
buildGeometryData( primitive, sources[ id ], input.offset, uv.array );
uv.stride = sources[ id ].stride;
break;
case 'TEXCOORD1':
buildGeometryData( primitive, sources[ id ], input.offset, uv2.array );
uv.stride = sources[ id ].stride;
break;
default:
console.warn( 'THREE.ColladaLoader: Semantic "%s" not handled in geometry build process.', key );
}
}
break;
case 'NORMAL':
buildGeometryData( primitive, sources[ input.id ], input.offset, normal.array );
normal.stride = sources[ input.id ].stride;
break;
case 'COLOR':
buildGeometryData( primitive, sources[ input.id ], input.offset, color.array, true );
color.stride = sources[ input.id ].stride;
break;
case 'TEXCOORD':
buildGeometryData( primitive, sources[ input.id ], input.offset, uv.array );
uv.stride = sources[ input.id ].stride;
break;
case 'TEXCOORD1':
buildGeometryData( primitive, sources[ input.id ], input.offset, uv2.array );
uv2.stride = sources[ input.id ].stride;
break;
}
}
}
// build geometry
if ( position.array.length > 0 ) geometry.setAttribute( 'position', new Float32BufferAttribute( position.array, position.stride ) );
if ( normal.array.length > 0 ) geometry.setAttribute( 'normal', new Float32BufferAttribute( normal.array, normal.stride ) );
if ( color.array.length > 0 ) geometry.setAttribute( 'color', new Float32BufferAttribute( color.array, color.stride ) );
if ( uv.array.length > 0 ) geometry.setAttribute( 'uv', new Float32BufferAttribute( uv.array, uv.stride ) );
if ( uv2.array.length > 0 ) geometry.setAttribute( 'uv2', new Float32BufferAttribute( uv2.array, uv2.stride ) );
if ( skinIndex.array.length > 0 ) geometry.setAttribute( 'skinIndex', new Float32BufferAttribute( skinIndex.array, skinIndex.stride ) );
if ( skinWeight.array.length > 0 ) geometry.setAttribute( 'skinWeight', new Float32BufferAttribute( skinWeight.array, skinWeight.stride ) );
build.data = geometry;
build.type = primitives[ 0 ].type;
build.materialKeys = materialKeys;
return build;
}
function buildGeometryData( primitive, source, offset, array, isColor = false ) {
const indices = primitive.p;
const stride = primitive.stride;
const vcount = primitive.vcount;
function pushVector( i ) {
let index = indices[ i + offset ] * sourceStride;
const length = index + sourceStride;
for ( ; index < length; index ++ ) {
array.push( sourceArray[ index ] );
}
if ( isColor ) {
// convert the vertex colors from srgb to linear if present
const startIndex = array.length - sourceStride - 1;
tempColor.setRGB(
array[ startIndex + 0 ],
array[ startIndex + 1 ],
array[ startIndex + 2 ]
).convertSRGBToLinear();
array[ startIndex + 0 ] = tempColor.r;
array[ startIndex + 1 ] = tempColor.g;
array[ startIndex + 2 ] = tempColor.b;
}
}
const sourceArray = source.array;
const sourceStride = source.stride;
if ( primitive.vcount !== undefined ) {
let index = 0;
for ( let i = 0, l = vcount.length; i < l; i ++ ) {
const count = vcount[ i ];
if ( count === 4 ) {
const a = index + stride * 0;
const b = index + stride * 1;
const c = index + stride * 2;
const d = index + stride * 3;
pushVector( a ); pushVector( b ); pushVector( d );
pushVector( b ); pushVector( c ); pushVector( d );
} else if ( count === 3 ) {
const a = index + stride * 0;
const b = index + stride * 1;
const c = index + stride * 2;
pushVector( a ); pushVector( b ); pushVector( c );
} else if ( count > 4 ) {
for ( let k = 1, kl = ( count - 2 ); k <= kl; k ++ ) {
const a = index + stride * 0;
const b = index + stride * k;
const c = index + stride * ( k + 1 );
pushVector( a ); pushVector( b ); pushVector( c );
}
}
index += stride * count;
}
} else {
for ( let i = 0, l = indices.length; i < l; i += stride ) {
pushVector( i );
}
}
}
function getGeometry( id ) {
return getBuild( library.geometries[ id ], buildGeometry );
}
// kinematics
function parseKinematicsModel( xml ) {
const data = {
name: xml.getAttribute( 'name' ) || '',
joints: {},
links: []
};
for ( let i = 0; i < xml.childNodes.length; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'technique_common':
parseKinematicsTechniqueCommon( child, data );
break;
}
}
library.kinematicsModels[ xml.getAttribute( 'id' ) ] = data;
}
function buildKinematicsModel( data ) {
if ( data.build !== undefined ) return data.build;
return data;
}
function getKinematicsModel( id ) {
return getBuild( library.kinematicsModels[ id ], buildKinematicsModel );
}
function parseKinematicsTechniqueCommon( xml, data ) {
for ( let i = 0; i < xml.childNodes.length; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'joint':
data.joints[ child.getAttribute( 'sid' ) ] = parseKinematicsJoint( child );
break;
case 'link':
data.links.push( parseKinematicsLink( child ) );
break;
}
}
}
function parseKinematicsJoint( xml ) {
let data;
for ( let i = 0; i < xml.childNodes.length; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'prismatic':
case 'revolute':
data = parseKinematicsJointParameter( child );
break;
}
}
return data;
}
function parseKinematicsJointParameter( xml ) {
const data = {
sid: xml.getAttribute( 'sid' ),
name: xml.getAttribute( 'name' ) || '',
axis: new Vector3(),
limits: {
min: 0,
max: 0
},
type: xml.nodeName,
static: false,
zeroPosition: 0,
middlePosition: 0
};
for ( let i = 0; i < xml.childNodes.length; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'axis':
const array = parseFloats( child.textContent );
data.axis.fromArray( array );
break;
case 'limits':
const max = child.getElementsByTagName( 'max' )[ 0 ];
const min = child.getElementsByTagName( 'min' )[ 0 ];
data.limits.max = parseFloat( max.textContent );
data.limits.min = parseFloat( min.textContent );
break;
}
}
// if min is equal to or greater than max, consider the joint static
if ( data.limits.min >= data.limits.max ) {
data.static = true;
}
// calculate middle position
data.middlePosition = ( data.limits.min + data.limits.max ) / 2.0;
return data;
}
function parseKinematicsLink( xml ) {
const data = {
sid: xml.getAttribute( 'sid' ),
name: xml.getAttribute( 'name' ) || '',
attachments: [],
transforms: []
};
for ( let i = 0; i < xml.childNodes.length; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'attachment_full':
data.attachments.push( parseKinematicsAttachment( child ) );
break;
case 'matrix':
case 'translate':
case 'rotate':
data.transforms.push( parseKinematicsTransform( child ) );
break;
}
}
return data;
}
function parseKinematicsAttachment( xml ) {
const data = {
joint: xml.getAttribute( 'joint' ).split( '/' ).pop(),
transforms: [],
links: []
};
for ( let i = 0; i < xml.childNodes.length; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'link':
data.links.push( parseKinematicsLink( child ) );
break;
case 'matrix':
case 'translate':
case 'rotate':
data.transforms.push( parseKinematicsTransform( child ) );
break;
}
}
return data;
}
function parseKinematicsTransform( xml ) {
const data = {
type: xml.nodeName
};
const array = parseFloats( xml.textContent );
switch ( data.type ) {
case 'matrix':
data.obj = new Matrix4();
data.obj.fromArray( array ).transpose();
break;
case 'translate':
data.obj = new Vector3();
data.obj.fromArray( array );
break;
case 'rotate':
data.obj = new Vector3();
data.obj.fromArray( array );
data.angle = MathUtils.degToRad( array[ 3 ] );
break;
}
return data;
}
// physics
function parsePhysicsModel( xml ) {
const data = {
name: xml.getAttribute( 'name' ) || '',
rigidBodies: {}
};
for ( let i = 0; i < xml.childNodes.length; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'rigid_body':
data.rigidBodies[ child.getAttribute( 'name' ) ] = {};
parsePhysicsRigidBody( child, data.rigidBodies[ child.getAttribute( 'name' ) ] );
break;
}
}
library.physicsModels[ xml.getAttribute( 'id' ) ] = data;
}
function parsePhysicsRigidBody( xml, data ) {
for ( let i = 0; i < xml.childNodes.length; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'technique_common':
parsePhysicsTechniqueCommon( child, data );
break;
}
}
}
function parsePhysicsTechniqueCommon( xml, data ) {
for ( let i = 0; i < xml.childNodes.length; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'inertia':
data.inertia = parseFloats( child.textContent );
break;
case 'mass':
data.mass = parseFloats( child.textContent )[ 0 ];
break;
}
}
}
// scene
function parseKinematicsScene( xml ) {
const data = {
bindJointAxis: []
};
for ( let i = 0; i < xml.childNodes.length; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'bind_joint_axis':
data.bindJointAxis.push( parseKinematicsBindJointAxis( child ) );
break;
}
}
library.kinematicsScenes[ parseId( xml.getAttribute( 'url' ) ) ] = data;
}
function parseKinematicsBindJointAxis( xml ) {
const data = {
target: xml.getAttribute( 'target' ).split( '/' ).pop()
};
for ( let i = 0; i < xml.childNodes.length; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
switch ( child.nodeName ) {
case 'axis':
const param = child.getElementsByTagName( 'param' )[ 0 ];
data.axis = param.textContent;
const tmpJointIndex = data.axis.split( 'inst_' ).pop().split( 'axis' )[ 0 ];
data.jointIndex = tmpJointIndex.substr( 0, tmpJointIndex.length - 1 );
break;
}
}
return data;
}
function buildKinematicsScene( data ) {
if ( data.build !== undefined ) return data.build;
return data;
}
function getKinematicsScene( id ) {
return getBuild( library.kinematicsScenes[ id ], buildKinematicsScene );
}
function setupKinematics() {
const kinematicsModelId = Object.keys( library.kinematicsModels )[ 0 ];
const kinematicsSceneId = Object.keys( library.kinematicsScenes )[ 0 ];
const visualSceneId = Object.keys( library.visualScenes )[ 0 ];
if ( kinematicsModelId === undefined || kinematicsSceneId === undefined ) return;
const kinematicsModel = getKinematicsModel( kinematicsModelId );
const kinematicsScene = getKinematicsScene( kinematicsSceneId );
const visualScene = getVisualScene( visualSceneId );
const bindJointAxis = kinematicsScene.bindJointAxis;
const jointMap = {};
for ( let i = 0, l = bindJointAxis.length; i < l; i ++ ) {
const axis = bindJointAxis[ i ];
// the result of the following query is an element of type 'translate', 'rotate','scale' or 'matrix'
const targetElement = collada.querySelector( '[sid="' + axis.target + '"]' );
if ( targetElement ) {
// get the parent of the transform element
const parentVisualElement = targetElement.parentElement;
// connect the joint of the kinematics model with the element in the visual scene
connect( axis.jointIndex, parentVisualElement );
}
}
function connect( jointIndex, visualElement ) {
const visualElementName = visualElement.getAttribute( 'name' );
const joint = kinematicsModel.joints[ jointIndex ];
visualScene.traverse( function ( object ) {
if ( object.name === visualElementName ) {
jointMap[ jointIndex ] = {
object: object,
transforms: buildTransformList( visualElement ),
joint: joint,
position: joint.zeroPosition
};
}
} );
}
const m0 = new Matrix4();
kinematics = {
joints: kinematicsModel && kinematicsModel.joints,
getJointValue: function ( jointIndex ) {
const jointData = jointMap[ jointIndex ];
if ( jointData ) {
return jointData.position;
} else {
console.warn( 'THREE.ColladaLoader: Joint ' + jointIndex + ' doesn\'t exist.' );
}
},
setJointValue: function ( jointIndex, value ) {
const jointData = jointMap[ jointIndex ];
if ( jointData ) {
const joint = jointData.joint;
if ( value > joint.limits.max || value < joint.limits.min ) {
console.warn( 'THREE.ColladaLoader: Joint ' + jointIndex + ' value ' + value + ' outside of limits (min: ' + joint.limits.min + ', max: ' + joint.limits.max + ').' );
} else if ( joint.static ) {
console.warn( 'THREE.ColladaLoader: Joint ' + jointIndex + ' is static.' );
} else {
const object = jointData.object;
const axis = joint.axis;
const transforms = jointData.transforms;
matrix.identity();
// each update, we have to apply all transforms in the correct order
for ( let i = 0; i < transforms.length; i ++ ) {
const transform = transforms[ i ];
// if there is a connection of the transform node with a joint, apply the joint value
if ( transform.sid && transform.sid.indexOf( jointIndex ) !== - 1 ) {
switch ( joint.type ) {
case 'revolute':
matrix.multiply( m0.makeRotationAxis( axis, MathUtils.degToRad( value ) ) );
break;
case 'prismatic':
matrix.multiply( m0.makeTranslation( axis.x * value, axis.y * value, axis.z * value ) );
break;
default:
console.warn( 'THREE.ColladaLoader: Unknown joint type: ' + joint.type );
break;
}
} else {
switch ( transform.type ) {
case 'matrix':
matrix.multiply( transform.obj );
break;
case 'translate':
matrix.multiply( m0.makeTranslation( transform.obj.x, transform.obj.y, transform.obj.z ) );
break;
case 'scale':
matrix.scale( transform.obj );
break;
case 'rotate':
matrix.multiply( m0.makeRotationAxis( transform.obj, transform.angle ) );
break;
}
}
}
object.matrix.copy( matrix );
object.matrix.decompose( object.position, object.quaternion, object.scale );
jointMap[ jointIndex ].position = value;
}
} else {
console.log( 'THREE.ColladaLoader: ' + jointIndex + ' does not exist.' );
}
}
};
}
function buildTransformList( node ) {
const transforms = [];
const xml = collada.querySelector( '[id="' + node.id + '"]' );
for ( let i = 0; i < xml.childNodes.length; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
let array, vector;
switch ( child.nodeName ) {
case 'matrix':
array = parseFloats( child.textContent );
const matrix = new Matrix4().fromArray( array ).transpose();
transforms.push( {
sid: child.getAttribute( 'sid' ),
type: child.nodeName,
obj: matrix
} );
break;
case 'translate':
case 'scale':
array = parseFloats( child.textContent );
vector = new Vector3().fromArray( array );
transforms.push( {
sid: child.getAttribute( 'sid' ),
type: child.nodeName,
obj: vector
} );
break;
case 'rotate':
array = parseFloats( child.textContent );
vector = new Vector3().fromArray( array );
const angle = MathUtils.degToRad( array[ 3 ] );
transforms.push( {
sid: child.getAttribute( 'sid' ),
type: child.nodeName,
obj: vector,
angle: angle
} );
break;
}
}
return transforms;
}
// nodes
function prepareNodes( xml ) {
const elements = xml.getElementsByTagName( 'node' );
// ensure all node elements have id attributes
for ( let i = 0; i < elements.length; i ++ ) {
const element = elements[ i ];
if ( element.hasAttribute( 'id' ) === false ) {
element.setAttribute( 'id', generateId() );
}
}
}
const matrix = new Matrix4();
const vector = new Vector3();
function parseNode( xml ) {
const data = {
name: xml.getAttribute( 'name' ) || '',
type: xml.getAttribute( 'type' ),
id: xml.getAttribute( 'id' ),
sid: xml.getAttribute( 'sid' ),
matrix: new Matrix4(),
nodes: [],
instanceCameras: [],
instanceControllers: [],
instanceLights: [],
instanceGeometries: [],
instanceNodes: [],
transforms: {}
};
for ( let i = 0; i < xml.childNodes.length; i ++ ) {
const child = xml.childNodes[ i ];
if ( child.nodeType !== 1 ) continue;
let array;
switch ( child.nodeName ) {
case 'node':
data.nodes.push( child.getAttribute( 'id' ) );
parseNode( child );
break;
case 'instance_camera':
data.instanceCameras.push( parseId( child.getAttribute( 'url' ) ) );
break;
case 'instance_controller':
data.instanceControllers.push( parseNodeInstance( child ) );
break;
case 'instance_light':
data.instanceLights.push( parseId( child.getAttribute( 'url' ) ) );
break;
case 'instance_geometry':
data.instanceGeometries.push( parseNodeInstance( child ) );
break;
case 'instance_node':
data.instanceNodes.push( parseId( child.getAttribute( 'url' ) ) );
break;
case 'matrix':
array = parseFloats( child.textContent );
data.matrix.multiply( matrix.fromArray( array ).transpose() );
data.transforms[ child.getAttribute( 'sid' ) ] = child.nodeName;
break;
case 'translate':
array = parseFloats( child.textContent );
vector.fromArray( array );
data.matrix.multiply( matrix.makeTranslation( vector.x, vector.y, vector.z ) );
data.transforms[ child.getAttribute( 'sid' ) ] = child.nodeName;
break;
case 'rotate':
array = parseFloats( child.textContent );
const angle = MathUtils.degToRad( array[ 3 ] );
data.matrix.multiply( matrix.makeRotationAxis( vector.fromArray( array ), angle ) );
data.transforms[ child.getAttribute( 'sid' ) ] = child.nodeName;
break;
case 'scale':
array = parseFloats( child.textContent );
data.matrix.scale( vector.fromArray( array ) );
data.transforms[ child.getAttribute( 'sid' ) ] = child.nodeName;
break;
case 'extra':
break;
default:
console.log( child );
}
}
if ( hasNode( data.id ) ) {
console.warn( 'THREE.ColladaLoader: There is already a node with ID %s. Exclude current node from further processing.', data.id );
} else {
library.nodes[ data.id ] = data;
}
return data;
}
function parseNodeInstance( xml ) {
const data = {
id: parseId( xml.getAttribute( 'url' ) ),
materials: {},
skeletons: []
};
for ( let i = 0; i < xml.childNodes.length; i ++ ) {
const child = xml.childNodes[ i ];
switch ( child.nodeName ) {
case 'bind_material':
const instances = child.getElementsByTagName( 'instance_material' );
for ( let j = 0; j < instances.length; j ++ ) {
const instance = instances[ j ];
const symbol = instance.getAttribute( 'symbol' );
const target = instance.getAttribute( 'target' );
data.materials[ symbol ] = parseId( target );
}
break;
case 'skeleton':
data.skeletons.push( parseId( child.textContent ) );
break;
default:
break;
}
}
return data;
}
function buildSkeleton( skeletons, joints ) {
const boneData = [];
const sortedBoneData = [];
let i, j, data;
// a skeleton can have multiple root bones. collada expresses this
// situtation with multiple "skeleton" tags per controller instance
for ( i = 0; i < skeletons.length; i ++ ) {
const skeleton = skeletons[ i ];
let root;
if ( hasNode( skeleton ) ) {
root = getNode( skeleton );
buildBoneHierarchy( root, joints, boneData );
} else if ( hasVisualScene( skeleton ) ) {
// handle case where the skeleton refers to the visual scene (#13335)
const visualScene = library.visualScenes[ skeleton ];
const children = visualScene.children;
for ( let j = 0; j < children.length; j ++ ) {
const child = children[ j ];
if ( child.type === 'JOINT' ) {
const root = getNode( child.id );
buildBoneHierarchy( root, joints, boneData );
}
}
} else {
console.error( 'THREE.ColladaLoader: Unable to find root bone of skeleton with ID:', skeleton );
}
}
// sort bone data (the order is defined in the corresponding controller)
for ( i = 0; i < joints.length; i ++ ) {
for ( j = 0; j < boneData.length; j ++ ) {
data = boneData[ j ];
if ( data.bone.name === joints[ i ].name ) {
sortedBoneData[ i ] = data;
data.processed = true;
break;
}
}
}
// add unprocessed bone data at the end of the list
for ( i = 0; i < boneData.length; i ++ ) {
data = boneData[ i ];
if ( data.processed === false ) {
sortedBoneData.push( data );
data.processed = true;
}
}
// setup arrays for skeleton creation
const bones = [];
const boneInverses = [];
for ( i = 0; i < sortedBoneData.length; i ++ ) {
data = sortedBoneData[ i ];
bones.push( data.bone );
boneInverses.push( data.boneInverse );
}
return new Skeleton( bones, boneInverses );
}
function buildBoneHierarchy( root, joints, boneData ) {
// setup bone data from visual scene
root.traverse( function ( object ) {
if ( object.isBone === true ) {
let boneInverse;
// retrieve the boneInverse from the controller data
for ( let i = 0; i < joints.length; i ++ ) {
const joint = joints[ i ];
if ( joint.name === object.name ) {
boneInverse = joint.boneInverse;
break;
}
}
if ( boneInverse === undefined ) {
// Unfortunately, there can be joints in the visual scene that are not part of the
// corresponding controller. In this case, we have to create a dummy boneInverse matrix
// for the respective bone. This bone won't affect any vertices, because there are no skin indices
// and weights defined for it. But we still have to add the bone to the sorted bone list in order to
// ensure a correct animation of the model.
boneInverse = new Matrix4();
}
boneData.push( { bone: object, boneInverse: boneInverse, processed: false } );
}
} );
}
function buildNode( data ) {
const objects = [];
const matrix = data.matrix;
const nodes = data.nodes;
const type = data.type;
const instanceCameras = data.instanceCameras;
const instanceControllers = data.instanceControllers;
const instanceLights = data.instanceLights;
const instanceGeometries = data.instanceGeometries;
const instanceNodes = data.instanceNodes;
// nodes
for ( let i = 0, l = nodes.length; i < l; i ++ ) {
objects.push( getNode( nodes[ i ] ) );
}
// instance cameras
for ( let i = 0, l = instanceCameras.length; i < l; i ++ ) {
const instanceCamera = getCamera( instanceCameras[ i ] );
if ( instanceCamera !== null ) {
objects.push( instanceCamera.clone() );
}
}
// instance controllers
for ( let i = 0, l = instanceControllers.length; i < l; i ++ ) {
const instance = instanceControllers[ i ];
const controller = getController( instance.id );
const geometries = getGeometry( controller.id );
const newObjects = buildObjects( geometries, instance.materials );
const skeletons = instance.skeletons;
const joints = controller.skin.joints;
const skeleton = buildSkeleton( skeletons, joints );
for ( let j = 0, jl = newObjects.length; j < jl; j ++ ) {
const object = newObjects[ j ];
if ( object.isSkinnedMesh ) {
object.bind( skeleton, controller.skin.bindMatrix );
object.normalizeSkinWeights();
}
objects.push( object );
}
}
// instance lights
for ( let i = 0, l = instanceLights.length; i < l; i ++ ) {
const instanceLight = getLight( instanceLights[ i ] );
if ( instanceLight !== null ) {
objects.push( instanceLight.clone() );
}
}
// instance geometries
for ( let i = 0, l = instanceGeometries.length; i < l; i ++ ) {
const instance = instanceGeometries[ i ];
// a single geometry instance in collada can lead to multiple object3Ds.
// this is the case when primitives are combined like triangles and lines
const geometries = getGeometry( instance.id );
const newObjects = buildObjects( geometries, instance.materials );
for ( let j = 0, jl = newObjects.length; j < jl; j ++ ) {
objects.push( newObjects[ j ] );
}
}
// instance nodes
for ( let i = 0, l = instanceNodes.length; i < l; i ++ ) {
objects.push( getNode( instanceNodes[ i ] ).clone() );
}
let object;
if ( nodes.length === 0 && objects.length === 1 ) {
object = objects[ 0 ];
} else {
object = ( type === 'JOINT' ) ? new Bone() : new Group();
for ( let i = 0; i < objects.length; i ++ ) {
object.add( objects[ i ] );
}
}
object.name = ( type === 'JOINT' ) ? data.sid : data.name;
object.matrix.copy( matrix );
object.matrix.decompose( object.position, object.quaternion, object.scale );
return object;
}
const fallbackMaterial = new MeshBasicMaterial( { color: 0xff00ff } );
function resolveMaterialBinding( keys, instanceMaterials ) {
const materials = [];
for ( let i = 0, l = keys.length; i < l; i ++ ) {
const id = instanceMaterials[ keys[ i ] ];
if ( id === undefined ) {
console.warn( 'THREE.ColladaLoader: Material with key %s not found. Apply fallback material.', keys[ i ] );
materials.push( fallbackMaterial );
} else {
materials.push( getMaterial( id ) );
}
}
return materials;
}
function buildObjects( geometries, instanceMaterials ) {
const objects = [];
for ( const type in geometries ) {
const geometry = geometries[ type ];
const materials = resolveMaterialBinding( geometry.materialKeys, instanceMaterials );
// handle case if no materials are defined
if ( materials.length === 0 ) {
if ( type === 'lines' || type === 'linestrips' ) {
materials.push( new LineBasicMaterial() );
} else {
materials.push( new MeshPhongMaterial() );
}
}
// regard skinning
const skinning = ( geometry.data.attributes.skinIndex !== undefined );
// choose between a single or multi materials (material array)
const material = ( materials.length === 1 ) ? materials[ 0 ] : materials;
// now create a specific 3D object
let object;
switch ( type ) {
case 'lines':
object = new LineSegments( geometry.data, material );
break;
case 'linestrips':
object = new Line( geometry.data, material );
break;
case 'triangles':
case 'polylist':
if ( skinning ) {
object = new SkinnedMesh( geometry.data, material );
} else {
object = new Mesh( geometry.data, material );
}
break;
}
objects.push( object );
}
return objects;
}
function hasNode( id ) {
return library.nodes[ id ] !== undefined;
}
function getNode( id ) {
return getBuild( library.nodes[ id ], buildNode );
}
// visual scenes
function parseVisualScene( xml ) {
const data = {
name: xml.getAttribute( 'name' ),
children: []
};
prepareNodes( xml );
const elements = getElementsByTagName( xml, 'node' );
for ( let i = 0; i < elements.length; i ++ ) {
data.children.push( parseNode( elements[ i ] ) );
}
library.visualScenes[ xml.getAttribute( 'id' ) ] = data;
}
function buildVisualScene( data ) {
const group = new Group();
group.name = data.name;
const children = data.children;
for ( let i = 0; i < children.length; i ++ ) {
const child = children[ i ];
group.add( getNode( child.id ) );
}
return group;
}
function hasVisualScene( id ) {
return library.visualScenes[ id ] !== undefined;
}
function getVisualScene( id ) {
return getBuild( library.visualScenes[ id ], buildVisualScene );
}
// scenes
function parseScene( xml ) {
const instance = getElementsByTagName( xml, 'instance_visual_scene' )[ 0 ];
return getVisualScene( parseId( instance.getAttribute( 'url' ) ) );
}
function setupAnimations() {
const clips = library.clips;
if ( isEmpty( clips ) === true ) {
if ( isEmpty( library.animations ) === false ) {
// if there are animations but no clips, we create a default clip for playback
const tracks = [];
for ( const id in library.animations ) {
const animationTracks = getAnimation( id );
for ( let i = 0, l = animationTracks.length; i < l; i ++ ) {
tracks.push( animationTracks[ i ] );
}
}
animations.push( new AnimationClip( 'default', - 1, tracks ) );
}
} else {
for ( const id in clips ) {
animations.push( getAnimationClip( id ) );
}
}
}
// convert the parser error element into text with each child elements text
// separated by new lines.
function parserErrorToText( parserError ) {
let result = '';
const stack = [ parserError ];
while ( stack.length ) {
const node = stack.shift();
if ( node.nodeType === Node.TEXT_NODE ) {
result += node.textContent;
} else {
result += '\n';
stack.push.apply( stack, node.childNodes );
}
}
return result.trim();
}
if ( text.length === 0 ) {
return { scene: new Scene() };
}
const xml = new DOMParser().parseFromString( text, 'application/xml' );
const collada = getElementsByTagName( xml, 'COLLADA' )[ 0 ];
const parserError = xml.getElementsByTagName( 'parsererror' )[ 0 ];
if ( parserError !== undefined ) {
// Chrome will return parser error with a div in it
const errorElement = getElementsByTagName( parserError, 'div' )[ 0 ];
let errorText;
if ( errorElement ) {
errorText = errorElement.textContent;
} else {
errorText = parserErrorToText( parserError );
}
console.error( 'THREE.ColladaLoader: Failed to parse collada file.\n', errorText );
return null;
}
// metadata
const version = collada.getAttribute( 'version' );
console.log( 'THREE.ColladaLoader: File version', version );
const asset = parseAsset( getElementsByTagName( collada, 'asset' )[ 0 ] );
const textureLoader = new TextureLoader( this.manager );
textureLoader.setPath( this.resourcePath || path ).setCrossOrigin( this.crossOrigin );
let tgaLoader;
if ( TGALoader ) {
tgaLoader = new TGALoader( this.manager );
tgaLoader.setPath( this.resourcePath || path );
}
//
const tempColor = new Color();
const animations = [];
let kinematics = {};
let count = 0;
//
const library = {
animations: {},
clips: {},
controllers: {},
images: {},
effects: {},
materials: {},
cameras: {},
lights: {},
geometries: {},
nodes: {},
visualScenes: {},
kinematicsModels: {},
physicsModels: {},
kinematicsScenes: {}
};
parseLibrary( collada, 'library_animations', 'animation', parseAnimation );
parseLibrary( collada, 'library_animation_clips', 'animation_clip', parseAnimationClip );
parseLibrary( collada, 'library_controllers', 'controller', parseController );
parseLibrary( collada, 'library_images', 'image', parseImage );
parseLibrary( collada, 'library_effects', 'effect', parseEffect );
parseLibrary( collada, 'library_materials', 'material', parseMaterial );
parseLibrary( collada, 'library_cameras', 'camera', parseCamera );
parseLibrary( collada, 'library_lights', 'light', parseLight );
parseLibrary( collada, 'library_geometries', 'geometry', parseGeometry );
parseLibrary( collada, 'library_nodes', 'node', parseNode );
parseLibrary( collada, 'library_visual_scenes', 'visual_scene', parseVisualScene );
parseLibrary( collada, 'library_kinematics_models', 'kinematics_model', parseKinematicsModel );
parseLibrary( collada, 'library_physics_models', 'physics_model', parsePhysicsModel );
parseLibrary( collada, 'scene', 'instance_kinematics_scene', parseKinematicsScene );
buildLibrary( library.animations, buildAnimation );
buildLibrary( library.clips, buildAnimationClip );
buildLibrary( library.controllers, buildController );
buildLibrary( library.images, buildImage );
buildLibrary( library.effects, buildEffect );
buildLibrary( library.materials, buildMaterial );
buildLibrary( library.cameras, buildCamera );
buildLibrary( library.lights, buildLight );
buildLibrary( library.geometries, buildGeometry );
buildLibrary( library.visualScenes, buildVisualScene );
setupAnimations();
setupKinematics();
const scene = parseScene( getElementsByTagName( collada, 'scene' )[ 0 ] );
scene.animations = animations;
if ( asset.upAxis === 'Z_UP' ) {
scene.quaternion.setFromEuler( new Euler( - Math.PI / 2, 0, 0 ) );
}
scene.scale.multiplyScalar( asset.unit );
return {
get animations() {
console.warn( 'THREE.ColladaLoader: Please access animations over scene.animations now.' );
return animations;
},
kinematics: kinematics,
library: library,
scene: scene
};
}
Example #29
Source File: LineSegments2.js From BlueMapWeb with MIT License | 4 votes |
LineSegments2.prototype = Object.assign( Object.create( Mesh.prototype ), {
constructor: LineSegments2,
isLineSegments2: true,
computeLineDistances: ( function () { // for backwards-compatability, but could be a method of LineSegmentsGeometry...
var start = new Vector3();
var end = new Vector3();
return function computeLineDistances() {
var geometry = this.geometry;
var instanceStart = geometry.attributes.instanceStart;
var instanceEnd = geometry.attributes.instanceEnd;
var lineDistances = new Float32Array( 2 * instanceStart.data.count );
for ( var i = 0, j = 0, l = instanceStart.data.count; i < l; i ++, j += 2 ) {
start.fromBufferAttribute( instanceStart, i );
end.fromBufferAttribute( instanceEnd, i );
lineDistances[ j ] = ( j === 0 ) ? 0 : lineDistances[ j - 1 ];
lineDistances[ j + 1 ] = lineDistances[ j ] + start.distanceTo( end );
}
var instanceDistanceBuffer = new InstancedInterleavedBuffer( lineDistances, 2, 1 ); // d0, d1
geometry.setAttribute( 'instanceDistanceStart', new InterleavedBufferAttribute( instanceDistanceBuffer, 1, 0 ) ); // d0
geometry.setAttribute( 'instanceDistanceEnd', new InterleavedBufferAttribute( instanceDistanceBuffer, 1, 1 ) ); // d1
return this;
};
}() ),
raycast: ( function () {
var start = new Vector4();
var end = new Vector4();
var ssOrigin = new Vector4();
var ssOrigin3 = new Vector3();
var mvMatrix = new Matrix4();
var line = new Line3();
var closestPoint = new Vector3();
return function raycast( raycaster, intersects ) {
if ( raycaster.camera === null ) {
console.error( 'LineSegments2: "Raycaster.camera" needs to be set in order to raycast against LineSegments2.' );
}
var threshold = ( raycaster.params.Line2 !== undefined ) ? raycaster.params.Line2.threshold || 0 : 0;
var ray = raycaster.ray;
var camera = raycaster.camera;
var projectionMatrix = camera.projectionMatrix;
var geometry = this.geometry;
var material = this.material;
var resolution = material.resolution;
var lineWidth = material.linewidth + threshold;
var instanceStart = geometry.attributes.instanceStart;
var instanceEnd = geometry.attributes.instanceEnd;
// pick a point 1 unit out along the ray to avoid the ray origin
// sitting at the camera origin which will cause "w" to be 0 when
// applying the projection matrix.
ray.at( 1, ssOrigin );
// ndc space [ - 1.0, 1.0 ]
ssOrigin.w = 1;
ssOrigin.applyMatrix4( camera.matrixWorldInverse );
ssOrigin.applyMatrix4( projectionMatrix );
ssOrigin.multiplyScalar( 1 / ssOrigin.w );
// screen space
ssOrigin.x *= resolution.x / 2;
ssOrigin.y *= resolution.y / 2;
ssOrigin.z = 0;
ssOrigin3.copy( ssOrigin );
var matrixWorld = this.matrixWorld;
mvMatrix.multiplyMatrices( camera.matrixWorldInverse, matrixWorld );
for ( var i = 0, l = instanceStart.count; i < l; i ++ ) {
start.fromBufferAttribute( instanceStart, i );
end.fromBufferAttribute( instanceEnd, i );
start.w = 1;
end.w = 1;
// camera space
start.applyMatrix4( mvMatrix );
end.applyMatrix4( mvMatrix );
// clip space
start.applyMatrix4( projectionMatrix );
end.applyMatrix4( projectionMatrix );
// ndc space [ - 1.0, 1.0 ]
start.multiplyScalar( 1 / start.w );
end.multiplyScalar( 1 / end.w );
// skip the segment if it's outside the camera near and far planes
var isBehindCameraNear = start.z < - 1 && end.z < - 1;
var isPastCameraFar = start.z > 1 && end.z > 1;
if ( isBehindCameraNear || isPastCameraFar ) {
continue;
}
// screen space
start.x *= resolution.x / 2;
start.y *= resolution.y / 2;
end.x *= resolution.x / 2;
end.y *= resolution.y / 2;
// create 2d segment
line.start.copy( start );
line.start.z = 0;
line.end.copy( end );
line.end.z = 0;
// get closest point on ray to segment
var param = line.closestPointToPointParameter( ssOrigin3, true );
line.at( param, closestPoint );
// check if the intersection point is within clip space
var zPos = MathUtils.lerp( start.z, end.z, param );
var isInClipSpace = zPos >= - 1 && zPos <= 1;
var isInside = ssOrigin3.distanceTo( closestPoint ) < lineWidth * 0.5;
if ( isInClipSpace && isInside ) {
line.start.fromBufferAttribute( instanceStart, i );
line.end.fromBufferAttribute( instanceEnd, i );
line.start.applyMatrix4( matrixWorld );
line.end.applyMatrix4( matrixWorld );
var pointOnLine = new Vector3();
var point = new Vector3();
ray.distanceSqToSegment( line.start, line.end, point, pointOnLine );
intersects.push( {
point: point,
pointOnLine: pointOnLine,
distance: ray.origin.distanceTo( point ),
object: this,
face: null,
faceIndex: i,
uv: null,
uv2: null,
} );
}
}
};
}() )
} );